diff --git a/.cargo/config.toml b/.cargo/config.toml index a6d413812c5f4..2c41ca0110f64 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -37,7 +37,6 @@ rustflags = [ # uncomment the following two lines to enable `TaskLocalAlloc` # "--cfg", # "enable_task_local_alloc", - ] [unstable] diff --git a/.config/hakari.toml b/.config/hakari.toml index 9cdfbb3b0c07f..c1355e0892334 100644 --- a/.config/hakari.toml +++ b/.config/hakari.toml @@ -16,13 +16,16 @@ resolver = "2" # https://doc.rust-lang.org/rustc/platform-support.html platforms = [ # "x86_64-unknown-linux-gnu", + # "aarch64-unknown-linux-gnu", # "x86_64-apple-darwin", - # "x86_64-pc-windows-msvc", + # "aarch64-apple-darwin", ] # Write out exact versions rather than a semver range. (Defaults to false.) # exact-versions = true +unify-target-host = "unify-if-both" + [traversal-excludes] workspace-members = [ "workspace-config", @@ -31,6 +34,9 @@ workspace-members = [ ] third-party = [ { name = "opendal" }, + # For some reasons, tikv-jemalloc-sys would be compiled twice if being added into `workspace-hack` + { name = "tikv-jemalloc-sys", git = "https://github.com/risingwavelabs/jemallocator.git", rev = "64a2d9" }, + { name = "tikv-jemallocator", git = "https://github.com/risingwavelabs/jemallocator.git", rev = "64a2d9" }, # These are solely dev-dependencies. Unifying them may slow down build. { name = "criterion" }, { name = "console" }, diff --git a/.dockerignore b/.dockerignore index 984013bc97e2a..aa0493b541e49 100644 --- a/.dockerignore +++ b/.dockerignore @@ -48,3 +48,6 @@ risedev-components.user.env riselab-components.user.env .git/ + +Dockerfile +.dockerignore diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index bae692a579559..c3a80429ee84d 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -33,3 +33,6 @@ f8266748dcb70541da944664552c1944ff8362e4 # feat(risedev): add check for trailing spaces in `risedev check` (#11294) f2a3fd021059e680b35b24c63cff5f8dbe9f9d5f + +# chore(rustfmt): format let-chains and let-else #9409 +d70dba827c303373f3220c9733f7c7443e5c2d37 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 235fb3bbacfbb..acd75f253b699 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,5 +3,5 @@ contact_links: url: https://github.com/risingwavelabs/risingwave/discussions about: Have questions? Welcome to open a discussion. - name: Community Chat - url: https://join.slack.com/t/risingwave-community/shared_invite/zt-120rft0mr-d8uGk3d~NZiZAQWPnElOfw + url: https://risingwave.com/slack about: Join the RisingWave Slack community and chat with us. diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2dd2d9347b96b..b2d58279b5290 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,7 +7,10 @@ updates: open-pull-requests-limit: 10 # Disable auto rebase to reduce cost. Use `@dependabot rebase` manually instead. rebase-strategy: "disabled" - + # Ignore patch to reduce spam. Manually run `cargo update` regularly instead. + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] # Create a group of dependencies to be updated together in one pull request groups: arrow: @@ -16,3 +19,7 @@ updates: aws: patterns: - "aws*" + tonic: + patterns: + - "tonic*" + - "prost*" diff --git a/.github/workflows/auto-create-doc-issue-by-issue.yml b/.github/workflows/auto-create-doc-issue-by-issue.yml new file mode 100644 index 0000000000000..0c8d78062977a --- /dev/null +++ b/.github/workflows/auto-create-doc-issue-by-issue.yml @@ -0,0 +1,31 @@ +name: Issue Documentation Checker + +on: + issues: + types: + - closed + - labeled + +jobs: + create-issue: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Log the event payload + run: echo "${{ toJSON(github.event) }}" + - name: Check if issue is done and labeled 'user-facing-changes' + uses: dacbd/create-issue-action@main + if: ${{ github.event.action == 'closed' && contains(github.event.issue.labels.*.name, 'user-facing-changes') }} + with: + token: ${{ secrets.ACCESS_TOKEN }} + owner: risingwavelabs + repo: risingwave-docs + title: | + Document: ${{ github.event.issue.title }} + body: | + ## Context + Source Issue URL: ${{ github.event.issue.html_url }} + Created At: ${{ github.event.issue.created_at }} + Created By: ${{ github.event.issue.user.login }} + Closed At: ${{ github.event.issue.closed_at }} diff --git a/.github/workflows/auto-create-docs-pr.yml b/.github/workflows/auto-create-doc-issue-by-pr.yml similarity index 100% rename from .github/workflows/auto-create-docs-pr.yml rename to .github/workflows/auto-create-doc-issue-by-pr.yml diff --git a/.github/workflows/cherry-pick-to-release-branch.yml b/.github/workflows/cherry-pick-to-release-branch.yml index e98e1769630b9..026b2313d8353 100644 --- a/.github/workflows/cherry-pick-to-release-branch.yml +++ b/.github/workflows/cherry-pick-to-release-branch.yml @@ -6,8 +6,8 @@ on: types: ["closed", "labeled"] jobs: - release_pull_request_1_1: - if: "contains(github.event.pull_request.labels.*.name, 'need-cherry-pick-v1.1') && github.event.pull_request.merged == true" + release_pull_request_1_3: + if: "contains(github.event.pull_request.labels.*.name, 'need-cherry-pick-v1.3') && github.event.pull_request.merged == true" runs-on: ubuntu-latest name: release_pull_request steps: @@ -16,9 +16,9 @@ jobs: - name: Create PR to branch uses: risingwavelabs/github-action-cherry-pick@master with: - pr_branch: 'v1.1-rc' + pr_branch: 'v1.3-rc' pr_labels: 'cherry-pick' - pr_body: ${{ format('Cherry picking \#{0} onto branch v1.1-rc', github.event.number) }} + pr_body: ${{ format('Cherry picking \#{0} onto branch v1.3-rc', github.event.number) }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 66f740666e2a7..3e181eda27fce 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -14,6 +14,16 @@ jobs: build: runs-on: ubuntu-latest steps: + - name: Maximize build space + uses: easimon/maximize-build-space@master + with: + remove-dotnet: 'true' + remove-android: 'true' + remove-haskell: 'true' + remove-codeql: 'true' + remove-docker-images: 'true' + root-reserve-mb: 10240 + temp-reserve-mb: 10240 - uses: actions/checkout@v3 - name: Setup Rust toolchain run: rustup show @@ -30,6 +40,8 @@ jobs: mkdir artifact cp -R target/doc/* artifact + - name: Show available storage + run: df -h - name: Install cargo-docset uses: taiki-e/install-action@v2 with: @@ -49,7 +61,8 @@ jobs: uses: actions/upload-pages-artifact@v1 with: path: artifact - + - name: Show available storage + run: df -h deploy: needs: build permissions: diff --git a/.github/workflows/hakari_fix.yml b/.github/workflows/hakari_fix.yml index 670ca38cccc27..b8ded582c36e9 100644 --- a/.github/workflows/hakari_fix.yml +++ b/.github/workflows/hakari_fix.yml @@ -15,7 +15,8 @@ jobs: steps: - uses: actions/checkout@v3 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name }} - name: Install cargo-hakari uses: taiki-e/install-action@v2 diff --git a/.github/workflows/nightly-rust.yml b/.github/workflows/nightly-rust.yml index e6afb6970daec..5219b4805c74d 100644 --- a/.github/workflows/nightly-rust.yml +++ b/.github/workflows/nightly-rust.yml @@ -19,7 +19,15 @@ jobs: remove-haskell: 'true' remove-codeql: 'true' remove-docker-images: 'true' + root-reserve-mb: 2048 - uses: actions/checkout@v3 + if: ${{ github.event_name == 'schedule' }} + with: + # For daily scheduled run, we use a fixed branch, so that we can apply patches to fix compile errors earlier. + # We can also ensure the regression is due to new rust instead of new RisingWave code. + ref: xxchan/latest-nightly-rust + - uses: actions/checkout@v3 + if: ${{ !(github.event_name == 'schedule') }} - name: Setup Rust toolchain run: | rustup override set nightly diff --git a/.gitignore b/.gitignore index 19fb6643dd8a6..375738f67093e 100644 --- a/.gitignore +++ b/.gitignore @@ -74,4 +74,7 @@ simulation-it-test.tar.zst # hummock-trace .trace +# spark binary +e2e_test/iceberg/spark-*-bin* + **/poetry.lock \ No newline at end of file diff --git a/.licenserc.yaml b/.licenserc.yaml index 43e9315437ef8..c1745a4d1ad74 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -5,9 +5,10 @@ header: paths: - "src/**/*.rs" + - "src/**/*.py" + - "src/**/*.html" - "dashboard/**/*.js" - "dashboard/**/*.ts" - - "src/**/*.html" - "java/**/*.java" - "java/**/*.py" @@ -16,5 +17,6 @@ header: - "**/*.d.ts" - "src/sqlparser/**/*.rs" - "java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/internal/*.java" + - "src/meta/src/model_v2/migration/**/*.rs" comment: on-failure diff --git a/.vscode/launch.json.example b/.vscode/launch.json.example index 6f8fbb18d4fe7..1748d2b179ea3 100644 --- a/.vscode/launch.json.example +++ b/.vscode/launch.json.example @@ -13,6 +13,21 @@ ], "cwd": "${workspaceRoot}", "preLaunchTask": "build rw bin" + }, + { + "name": "Open playground coredump", + "type": "lldb", + "request": "custom", + "targetCreateCommands": [ + "target create ${workspaceFolder}/target/debug/risingwave --core ${input:coreFileName}" + ], + } + ], + "inputs": [ + { + "id": "coreFileName", + "type": "promptString", + "description": "Enter core file path" } ] -} \ No newline at end of file +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9b519c16010ba..c0b3991fc1f61 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ Thanks for your interest in contributing to RisingWave! We welcome and appreciat This document describes how to submit your code changes. To learn about the development process, see the [developer guide](docs/developer-guide.md). To understand the design and implementation of RisingWave, refer to the design docs listed in [docs/README.md](docs/README.md). -If you have questions, you can search for existing discussions or start a new discussion in the [Discussions forum of RisingWave](https://github.com/risingwavelabs/risingwave/discussions), or ask in the RisingWave Community channel on Slack. Please use the [invitation link](https://join.slack.com/t/risingwave-community/shared_invite/zt-120rft0mr-d8uGk3d~NZiZAQWPnElOfw) to join the channel. +If you have questions, you can search for existing discussions or start a new discussion in the [Discussions forum of RisingWave](https://github.com/risingwavelabs/risingwave/discussions), or ask in the RisingWave Community channel on Slack. Please use the [invitation link](https://risingwave.com/slack) to join the channel. To report bugs, create a [GitHub issue](https://github.com/risingwavelabs/risingwave/issues/new/choose). diff --git a/Cargo.lock b/Cargo.lock index 9ba9676f4299a..5349b74d86f4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,13 +58,19 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.5" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" +checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -113,15 +119,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" version = "0.5.0" @@ -187,7 +184,7 @@ checksum = "9c0fdddc3fdac97394ffcc5c89c634faa9c1c166ced54189af34e407c97b6ee7" dependencies = [ "apache-avro-derive", "byteorder", - "digest 0.10.7", + "digest", "lazy_static", "libflate", "log", @@ -213,7 +210,7 @@ dependencies = [ "byteorder", "bzip2", "crc32fast", - "digest 0.10.7", + "digest", "lazy_static", "libflate", "log", @@ -244,14 +241,14 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] name = "arbitrary" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +checksum = "a2e1373abdaa212b704512ec2bd8b26bd0b7d5c3f70117411a5d9a451383c859" [[package]] name = "arc-swap" @@ -267,9 +264,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "arrow-arith" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "895263144bd4a69751cbe6a34a53f26626e19770b313a9fa792c415cd0e78f11" +checksum = "bc1d4e368e87ad9ee64f28b9577a3834ce10fe2703a26b28417d485bbbdff956" dependencies = [ "arrow-array", "arrow-buffer", @@ -282,9 +279,9 @@ dependencies = [ [[package]] name = "arrow-array" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "226fdc6c3a4ae154a74c24091d36a90b514f0ed7112f5b8322c1d8f354d8e20d" +checksum = "d02efa7253ede102d45a4e802a129e83bcc3f49884cab795b1ac223918e4318d" dependencies = [ "ahash 0.8.3", "arrow-buffer", @@ -298,9 +295,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4843af4dd679c2f35b69c572874da8fde33be53eb549a5fb128e7a4b763510" +checksum = "fda119225204141138cb0541c692fbfef0e875ba01bfdeaed09e9d354f9d6195" dependencies = [ "bytes", "half 2.3.1", @@ -309,9 +306,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e8b9990733a9b635f656efda3c9b8308c7a19695c9ec2c7046dd154f9b144b" +checksum = "1d825d51b9968868d50bc5af92388754056796dbc62a4e25307d588a1fc84dee" dependencies = [ "arrow-array", "arrow-buffer", @@ -326,9 +323,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da900f31ff01a0a84da0572209be72b2b6f980f3ea58803635de47913191c188" +checksum = "475a4c3699c8b4095ca61cecf15da6f67841847a5f5aac983ccb9a377d02f73a" dependencies = [ "arrow-buffer", "arrow-schema", @@ -338,29 +335,29 @@ dependencies = [ [[package]] name = "arrow-flight" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e40372d37a860a742f248d4d7c137950cd793f1b46f2b99a5116c55efbe2699f" +checksum = "cd938ea4a0e8d0db2b9f47ebba792f73f6188f4289707caeaf93a3be705e5ed5" dependencies = [ "arrow-array", "arrow-buffer", "arrow-cast", "arrow-ipc", "arrow-schema", - "base64 0.21.3", + "base64 0.21.4", "bytes", "futures", "paste", - "prost", + "prost 0.12.1", "tokio", - "tonic", + "tonic 0.10.2", ] [[package]] name = "arrow-ipc" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2707a8d7ee2d345d045283ece3ae43416175873483e5d96319c929da542a0b1f" +checksum = "1248005c8ac549f869b7a840859d942bf62471479c1a2d82659d453eebcd166a" dependencies = [ "arrow-array", "arrow-buffer", @@ -370,11 +367,26 @@ dependencies = [ "flatbuffers", ] +[[package]] +name = "arrow-ord" +version = "47.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03b87aa408ea6a6300e49eb2eba0c032c88ed9dc19e0a9948489c55efdca71f4" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "half 2.3.1", + "num", +] + [[package]] name = "arrow-row" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e32afc1329f7b372463b21c6ca502b07cf237e1ed420d87706c1770bb0ebd38" +checksum = "114a348ab581e7c9b6908fcab23cb39ff9f060eb19e72b13f8fb8eaa37f65d22" dependencies = [ "ahash 0.8.3", "arrow-array", @@ -387,16 +399,17 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b104f5daa730f00fde22adc03a12aa5a2ae9ccbbf99cbd53d284119ddc90e03d" +checksum = "5d1d179c117b158853e0101bfbed5615e86fe97ee356b4af901f1c5001e1ce4b" [[package]] name = "arrow-select" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b3ca55356d1eae07cf48808d8c462cea674393ae6ad1e0b120f40b422eb2b4" +checksum = "d5c71e003202e67e9db139e5278c79f5520bb79922261dfe140e4637ee8b6108" dependencies = [ + "ahash 0.8.3", "arrow-array", "arrow-buffer", "arrow-data", @@ -410,6 +423,16 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "async-channel" version = "1.9.0" @@ -434,6 +457,36 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-executor" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +dependencies = [ + "async-lock", + "async-task", + "concurrent-queue", + "fastrand 1.9.0", + "futures-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", + "tokio", +] + [[package]] name = "async-io" version = "1.13.0" @@ -465,15 +518,14 @@ dependencies = [ [[package]] name = "async-nats" -version = "0.31.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8257238e2a3629ee5618502a75d1b91f8017c24638c75349fc8d2d80cf1f7c4c" +checksum = "0e45b67ea596bb94741ef15ba1d90b72c92bdc07553d8033734cb620a2b39f1c" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "futures", "http", - "itoa", "memchr", "nkeys", "nuid", @@ -481,6 +533,7 @@ dependencies = [ "rand", "regex", "ring", + "rustls 0.21.7", "rustls-native-certs", "rustls-pemfile", "rustls-webpki 0.101.4", @@ -505,7 +558,34 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", ] [[package]] @@ -527,7 +607,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -544,7 +624,16 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", ] [[package]] @@ -554,15 +643,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" [[package]] -name = "atty" -version = "0.2.14" +name = "atomic-waker" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "auto_enums" @@ -573,7 +657,7 @@ dependencies = [ "derive_utils", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -836,7 +920,7 @@ dependencies = [ "once_cell", "percent-encoding", "regex", - "sha2 0.10.7", + "sha2", "time", "tracing", ] @@ -870,7 +954,7 @@ dependencies = [ "md-5", "pin-project-lite", "sha1", - "sha2 0.10.7", + "sha2", "tracing", ] @@ -1103,9 +1187,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64-simd" @@ -1117,6 +1201,15 @@ dependencies = [ "vsimd", ] +[[package]] +name = "base64-url" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c5b0a88aa36e9f095ee2e2b13fb8c5e4313e022783aedacc123328c0084916d" +dependencies = [ + "base64 0.21.4", +] + [[package]] name = "base64ct" version = "1.6.0" @@ -1201,6 +1294,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + [[package]] name = "bit-vec" version = "0.6.3" @@ -1218,6 +1320,9 @@ name = "bitflags" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +dependencies = [ + "serde", +] [[package]] name = "bitmaps" @@ -1249,20 +1354,26 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.9.0" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] -name = "block-buffer" -version = "0.10.4" +name = "blocking" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ - "generic-array", + "async-channel", + "async-lock", + "async-task", + "atomic-waker", + "fastrand 1.9.0", + "futures-lite", + "log", ] [[package]] @@ -1382,9 +1493,9 @@ checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -1568,20 +1679,20 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg-or-panic" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf85d5384815558275789d91d1895d1d9919a6e2534d6144650f036ac65691a6" +checksum = "bc7cb2538d4ecc42b6c3b57a83094d8c69894e74468d18cd045a09fdea807358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1656,24 +1767,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.7.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "clap" -version = "4.4.2" +version = "4.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" +checksum = "b1d7b8d5ec32af0fadc644bf1fd509a688c2103b185644bb1e29d164e0703136" dependencies = [ "clap_builder", "clap_derive", @@ -1681,14 +1777,14 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "5179bb514e4d7c2051749d8fcefa2ed6d06a9f4e6d69faf3805f5d80b8cf8d56" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.10.0", + "strsim", ] [[package]] @@ -1700,7 +1796,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -1762,9 +1858,9 @@ dependencies = [ [[package]] name = "cmsketch" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467e460587e81453bf9aeb43cd534e9c5ad670042023bd6c3f377c23b76cc2f0" +checksum = "93710598b87c37ea250ab17a36f9f79dbaf3bd20e55806cf09345103bc26d60e" dependencies = [ "paste", ] @@ -1804,7 +1900,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ "bytes", + "futures-core", "memchr", + "pin-project-lite", + "tokio", + "tokio-util", ] [[package]] @@ -1843,46 +1943,41 @@ dependencies = [ [[package]] name = "console-api" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e" +checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787" dependencies = [ - "prost", - "prost-types", - "tonic", + "futures-core", + "prost 0.12.1", + "prost-types 0.12.1", + "tonic 0.10.2", "tracing-core", ] [[package]] name = "console-subscriber" -version = "0.1.10" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" +checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e" dependencies = [ "console-api", "crossbeam-channel", "crossbeam-utils", - "futures", + "futures-task", "hdrhistogram", - "humantime 2.1.0", - "prost-types", + "humantime", + "prost-types 0.12.1", "serde", "serde_json", "thread_local", "tokio", "tokio-stream", - "tonic", + "tonic 0.10.2", "tracing", "tracing-core", "tracing-subscriber", ] -[[package]] -name = "const-oid" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" - [[package]] name = "const-oid" version = "0.9.5" @@ -1917,6 +2012,15 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -1962,18 +2066,18 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c289b8eac3a97329a524e953b5fd68a8416ca629e1a37287f12d9e0760aadbc" +checksum = "7aae6f552c4c0ccfb30b9559b77bc985a387d998e1736cbbe6b14c903f3656cf" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf07ba80f53fa7f7dc97b11087ea867f7ae4621cfca21a909eca92c0b96c7d9" +checksum = "95551de96900cefae691ce895ff2abc691ae3a0b97911a76b45faf99e432937b" dependencies = [ "bumpalo", "cranelift-bforest", @@ -1992,42 +2096,42 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a7ca088173130c5c033e944756e3e441fbf3f637f32b4f6eb70252580c6dd4" +checksum = "36a3ad7b2bb03de3383f258b00ca29d80234bebd5130cb6ef3bae37ada5baab0" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0114095ec7d2fbd658ed100bd007006360bc2530f57c6eee3d3838869140dbf9" +checksum = "915918fee4142c85fb04bafe0bcd697e2fd6c15a260301ea6f8d2ea332a30e86" [[package]] name = "cranelift-control" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d56031683a55a949977e756d21826eb17a1f346143a1badc0e120a15615cd38" +checksum = "37e447d548cd7f4fcb87fbd10edbd66a4f77966d17785ed50a08c8f3835483c8" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6565198b5684367371e2b946ceca721eb36965e75e3592fad12fc2e15f65d7b" +checksum = "9d8ab3352a1e5966968d7ab424bd3de8e6b58314760745c3817c2eec3fa2f918" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f28cc44847c8b98cb921e6bfc0f7b228f4d27519376fea724d181da91709a6" +checksum = "1bffa38431f7554aa1594f122263b87c9e04abc55c9f42b81d37342ac44f79f0" dependencies = [ "cranelift-codegen", "log", @@ -2037,15 +2141,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b658177e72178c438f7de5d6645c56d97af38e17fcb0b500459007b4e05cc5" +checksum = "84cef66a71c77938148b72bf006892c89d6be9274a08f7e669ff15a56145d701" [[package]] name = "cranelift-native" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf1c7de7221e6afcc5e13ced3b218faab3bc65b47eac67400046a05418aecd6a" +checksum = "f33c7e5eb446e162d2d10b17fe68e1f091020cc2e4e38b5501c21099600b0a1b" dependencies = [ "cranelift-codegen", "libc", @@ -2054,9 +2158,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.97.1" +version = "0.97.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76b0d28ebe8edb6b503630c489aa4669f1e2d13b97bec7271a0fcb0e159be3ad" +checksum = "632f7b64fa6a8c5b980eb6a17ef22089e15cb9f779f1ed3bd3072beab0686c09" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -2111,7 +2215,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2123,7 +2227,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.2", + "clap", "criterion-plot", "futures", "is-terminal", @@ -2278,9 +2382,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -2290,9 +2394,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] @@ -2304,20 +2408,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f34ba9a9bcb8645379e9de8cb3ecfcf4d1c85ba66d90deb3259206fa5aa193b" dependencies = [ "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "platforms", + "rustc_version", "subtle", - "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.37", ] [[package]] @@ -2344,7 +2462,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2361,7 +2479,7 @@ checksum = "2fa16a70dd58129e4dfffdff535fb1bce66673f7bbeec4a5a1765a504e1ccd84" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2404,7 +2522,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", + "strsim", "syn 1.0.109", ] @@ -2418,7 +2536,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", + "strsim", "syn 1.0.109", ] @@ -2432,8 +2550,8 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.32", + "strsim", + "syn 2.0.37", ] [[package]] @@ -2466,7 +2584,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2518,35 +2636,38 @@ dependencies = [ "uuid", ] -[[package]] -name = "der" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" -dependencies = [ - "const-oid 0.6.2", -] - [[package]] name = "der" version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ - "const-oid 0.9.5", - "pem-rfc7468 0.7.0", + "const-oid", + "pem-rfc7468", "zeroize", ] [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" dependencies = [ + "powerfmt", "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_builder" version = "0.12.0" @@ -2586,18 +2707,19 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] name = "dialoguer" -version = "0.10.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87" +checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" dependencies = [ "console", "shell-words", "tempfile", + "thiserror", "zeroize", ] @@ -2613,23 +2735,14 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", - "const-oid 0.9.5", + "block-buffer", + "const-oid", "crypto-common", "subtle", ] @@ -2684,12 +2797,17 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dlv-list" version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aead04dc46b5f263c25721cf25c9e595951d15055f8063f92392fa0d7f64cf4" +source = "git+https://github.com/sgodwincs/dlv-list-rs.git?rev=5bbc5d0#5bbc5d0cc84f257e173d851f8dc1674fb6e46f95" dependencies = [ "const-random", ] +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "downcast" version = "0.11.0" @@ -2716,9 +2834,9 @@ dependencies = [ [[package]] name = "duration-str" -version = "0.5.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f037c488d179e21c87ef5fa9c331e8e62f5dddfa84618b41bb197da03edff1" +checksum = "5e172e85f305d6a442b250bf40667ffcb91a24f52c9a1ca59e2fa991ac9b7790" dependencies = [ "chrono", "nom", @@ -2730,9 +2848,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" +checksum = "23d2f3407d9a573d666de4b5bdf10569d73ca9478087346697dcbae6244bfbcd" [[package]] name = "easy-ext" @@ -2742,23 +2860,23 @@ checksum = "49457524c7e65648794c98283282a0b7c73b10018e7091f1cdcfff314fd7ae59" [[package]] name = "ed25519" -version = "1.5.3" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ - "signature 1.6.4", + "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ "curve25519-dalek", "ed25519", - "sha2 0.9.9", - "zeroize", + "sha2", + "signature", ] [[package]] @@ -2778,6 +2896,9 @@ name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +dependencies = [ + "serde", +] [[package]] name = "encode_unicode" @@ -2803,7 +2924,27 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", +] + +[[package]] +name = "enum-display" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d4df33d54dd1959d177a0e2c2f4e5a8637a3054aa56861ed7e173ad2043fe2" +dependencies = [ + "enum-display-macro", +] + +[[package]] +name = "enum-display-macro" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0ce3a36047ede676eb0d2721d065beed8410cf4f113f489604d2971331cb378" +dependencies = [ + "convert_case", + "quote", + "syn 1.0.109", ] [[package]] @@ -2823,7 +2964,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2836,20 +2977,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.32", -] - -[[package]] -name = "env_logger" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", + "syn 2.0.37", ] [[package]] @@ -2858,7 +2986,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ - "humantime 2.1.0", + "humantime", "is-terminal", "log", "regex", @@ -2903,28 +3031,39 @@ dependencies = [ [[package]] name = "etcd-client" -version = "0.11.1" -source = "git+https://github.com/risingwavelabs/etcd-client.git?rev=d55550a#d55550a182f2119e39e64858771468e1b26f6777" +version = "0.12.1" +source = "git+https://github.com/risingwavelabs/etcd-client.git?rev=4e84d40#4e84d40a84b35718d814cc2afccc9274c9d78e1e" dependencies = [ "http", - "prost", + "prost 0.12.1", "tokio", "tokio-stream", - "tonic", + "tonic 0.10.2", "tonic-build", "tower", "tower-service", ] [[package]] -name = "ethnum" -version = "1.4.0" +name = "etcetera" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8ff382b2fa527fb7fb06eeebfc5bbb3f17e3cc6b9d70b006c41daa8824adac" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "serde", -] - + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "ethnum" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8ff382b2fa527fb7fb06eeebfc5bbb3f17e3cc6b9d70b006c41daa8824adac" +dependencies = [ + "serde", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -2958,6 +3097,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fancy-regex" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b95f7c0680e4142284cf8b22c14a476e87d61b004a3a0861872b32ef7ead40a2" +dependencies = [ + "bit-set", + "regex", +] + [[package]] name = "faster-hex" version = "0.8.1" @@ -2993,6 +3142,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "fiat-crypto" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" + [[package]] name = "fiemap" version = "0.1.1" @@ -3008,7 +3163,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a3cc21c33af89af0930c8cae4ade5e6fdc17b5d2c97b3d2e2edb67a1cf683f3" dependencies = [ - "env_logger 0.10.0", + "env_logger", "log", ] @@ -3081,6 +3236,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "flume" +version = "0.10.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -3114,7 +3281,7 @@ dependencies = [ [[package]] name = "foyer" version = "0.1.0" -source = "git+https://github.com/mrcroxx/foyer?rev=2c6f080#2c6f080835f49fc6026b97fd53d389bae81e9361" +source = "git+https://github.com/MrCroxx/foyer?rev=2261151#2261151107ad362851f5fff9ce4fa56e61911b10" dependencies = [ "foyer-common", "foyer-intrusive", @@ -3125,27 +3292,28 @@ dependencies = [ [[package]] name = "foyer-common" version = "0.1.0" -source = "git+https://github.com/mrcroxx/foyer?rev=2c6f080#2c6f080835f49fc6026b97fd53d389bae81e9361" +source = "git+https://github.com/MrCroxx/foyer?rev=2261151#2261151107ad362851f5fff9ce4fa56e61911b10" dependencies = [ "bytes", "foyer-workspace-hack", + "itertools 0.11.0", + "madsim-tokio", "parking_lot 0.12.1", "paste", "rand", - "tokio", "tracing", ] [[package]] name = "foyer-intrusive" version = "0.1.0" -source = "git+https://github.com/mrcroxx/foyer?rev=2c6f080#2c6f080835f49fc6026b97fd53d389bae81e9361" +source = "git+https://github.com/MrCroxx/foyer?rev=2261151#2261151107ad362851f5fff9ce4fa56e61911b10" dependencies = [ "bytes", "cmsketch", "foyer-common", "foyer-workspace-hack", - "itertools 0.10.5", + "itertools 0.11.0", "memoffset 0.9.0", "parking_lot 0.12.1", "paste", @@ -3156,11 +3324,10 @@ dependencies = [ [[package]] name = "foyer-storage" version = "0.1.0" -source = "git+https://github.com/mrcroxx/foyer?rev=2c6f080#2c6f080835f49fc6026b97fd53d389bae81e9361" +source = "git+https://github.com/MrCroxx/foyer?rev=2261151#2261151107ad362851f5fff9ce4fa56e61911b10" dependencies = [ "anyhow", "async-channel", - "async-trait", "bitflags 2.4.0", "bitmaps", "bytes", @@ -3171,6 +3338,7 @@ dependencies = [ "futures", "itertools 0.11.0", "libc", + "madsim-tokio", "memoffset 0.9.0", "nix 0.27.1", "parking_lot 0.12.1", @@ -3178,7 +3346,6 @@ dependencies = [ "prometheus", "rand", "thiserror", - "tokio", "tracing", "twox-hash", ] @@ -3186,7 +3353,7 @@ dependencies = [ [[package]] name = "foyer-workspace-hack" version = "0.1.0" -source = "git+https://github.com/mrcroxx/foyer?rev=2c6f080#2c6f080835f49fc6026b97fd53d389bae81e9361" +source = "git+https://github.com/MrCroxx/foyer?rev=2261151#2261151107ad362851f5fff9ce4fa56e61911b10" dependencies = [ "crossbeam-utils", "either", @@ -3195,9 +3362,8 @@ dependencies = [ "futures-sink", "futures-util", "hyper", - "itertools 0.10.5", + "itertools 0.11.0", "libc", - "lock_api", "memchr", "parking_lot 0.12.1", "parking_lot_core 0.9.8", @@ -3205,9 +3371,9 @@ dependencies = [ "quote", "rand", "regex", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", - "syn 2.0.32", + "regex-automata 0.4.1", + "regex-syntax 0.8.0", + "syn 2.0.37", "tokio", "tracing", "tracing-core", @@ -3244,7 +3410,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3256,7 +3422,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3268,7 +3434,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3336,9 +3502,9 @@ dependencies = [ [[package]] name = "futures-async-stream" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f529ccdeacfa2446a9577041686cf1abb839b1b3e15fee4c1b1232ab3b7d799f" +checksum = "379790776b0d953337df4ab7ecc51936c66ea112484cad7912907b1d34253ebf" dependencies = [ "futures-async-stream-macro", "futures-core", @@ -3347,13 +3513,13 @@ dependencies = [ [[package]] name = "futures-async-stream-macro" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b48ee06dc8d2808ba5ebad075d06c3406085bb19deaac33be64c39113bf80" +checksum = "5df2c13d48c8cb8a3ec093ede6f0f4482f327d7bb781120c5fb483ef0f17e758" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.37", ] [[package]] @@ -3383,6 +3549,17 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.12.1", +] + [[package]] name = "futures-io" version = "0.3.28" @@ -3412,7 +3589,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3531,6 +3708,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "google-cloud-auth" version = "0.12.0" @@ -3538,7 +3727,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "931bedb2264cb00f914b0a6a5c304e34865c34306632d3932e0951a073e4a67d" dependencies = [ "async-trait", - "base64 0.21.3", + "base64 0.21.4", "google-cloud-metadata", "google-cloud-token", "home", @@ -3564,7 +3753,7 @@ dependencies = [ "thiserror", "tokio", "tokio-retry", - "tonic", + "tonic 0.9.2", "tower", "tracing", ] @@ -3575,9 +3764,9 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5453af21ac0cc1f3b2cfb5b687c174e701c10ec2d5c286aff7ca8cbbf08d31b4" dependencies = [ - "prost", - "prost-types", - "tonic", + "prost 0.11.9", + "prost-types 0.11.9", + "tonic 0.9.2", ] [[package]] @@ -3603,7 +3792,7 @@ dependencies = [ "google-cloud-gax", "google-cloud-googleapis", "google-cloud-token", - "prost-types", + "prost-types 0.11.9", "thiserror", "tokio", "tokio-util", @@ -3710,6 +3899,15 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.0", +] + [[package]] name = "hdrhistogram" version = "7.5.2" @@ -3738,14 +3936,8 @@ name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "libc", + "unicode-segmentation", ] [[package]] @@ -3760,13 +3952,22 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -3829,15 +4030,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - [[package]] name = "humantime" version = "2.1.0" @@ -3960,14 +4152,16 @@ dependencies = [ [[package]] name = "icelake" -version = "0.0.9" -source = "git+https://github.com/icelake-io/icelake?rev=a6790d17094754959e351fac1e11147e37643e97#a6790d17094754959e351fac1e11147e37643e97" +version = "0.0.10" +source = "git+https://github.com/icelake-io/icelake?rev=16dab0e36ab337e58ee8002d828def2d212fa116#16dab0e36ab337e58ee8002d828def2d212fa116" dependencies = [ "anyhow", "apache-avro 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", "arrow-arith", "arrow-array", "arrow-buffer", + "arrow-cast", + "arrow-ord", "arrow-row", "arrow-schema", "arrow-select", @@ -3975,21 +4169,28 @@ dependencies = [ "bitvec", "bytes", "chrono", + "csv", + "enum-display", "faster-hex", "futures", + "itertools 0.11.0", "log", + "murmur3", "once_cell", - "opendal", + "opendal 0.40.0", "ordered-float 3.9.1", "parquet", "regex", + "reqwest", "rust_decimal", "serde", "serde_bytes", "serde_json", - "serde_with 3.3.0", + "serde_with 3.4.0", "tokio", + "toml 0.7.8", "url", + "urlencoding", "uuid", ] @@ -4074,6 +4275,17 @@ dependencies = [ "str_stack", ] +[[package]] +name = "inherent" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce243b1bfa62ffc028f1cc3b6034ec63d649f3031bc8a4fbbb004e1ac17d1f68" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.37", +] + [[package]] name = "inquire" version = "0.6.2" @@ -4121,7 +4333,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", "windows-sys 0.48.0", ] @@ -4144,17 +4356,11 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "rustix 0.38.11", "windows-sys 0.48.0", ] -[[package]] -name = "iter-chunks" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7abddfc4e19bc38f3922e41b341fedb4e1470e922f024c4e5ae5922f56c7593" - [[package]] name = "itertools" version = "0.10.5" @@ -4181,9 +4387,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "ittapi" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41e0d0b7b3b53d92a7e8b80ede3400112a6b8b4c98d1f5b8b16bb787c780582c" +checksum = "25a5c0b993601cad796222ea076565c5d9f337d35592f8622c753724f06d7271" dependencies = [ "anyhow", "ittapi-sys", @@ -4192,13 +4398,23 @@ dependencies = [ [[package]] name = "ittapi-sys" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f8763c96e54e6d6a0dccc2990d8b5e33e3313aaeae6185921a3f4c1614a77c" +checksum = "cb7b5e473765060536a660eed127f758cf1a810c73e49063264959c60d1727d9" dependencies = [ "cc", ] +[[package]] +name = "java-locator" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90003f2fd9c52f212c21d8520f1128da0080bad6fff16b68fe6e7f2f0c3780c2" +dependencies = [ + "glob", + "lazy_static", +] + [[package]] name = "jni" version = "0.21.1" @@ -4208,7 +4424,9 @@ dependencies = [ "cesu8", "cfg-if", "combine", + "java-locator", "jni-sys", + "libloading", "log", "thiserror", "walkdir", @@ -4242,12 +4460,11 @@ dependencies = [ [[package]] name = "jsonschema-transpiler" version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40dc049cde84638e2a5657f45a4c74f471a4ecf2a8e2aa0b8ea899d5b1ebdee9" +source = "git+https://github.com/mozilla/jsonschema-transpiler?rev=c1a89d720d118843d8bcca51084deb0ed223e4b4#c1a89d720d118843d8bcca51084deb0ed223e4b4" dependencies = [ - "clap 2.32.0", - "env_logger 0.6.2", - "heck 0.3.3", + "clap", + "env_logger", + "heck 0.4.1", "log", "maplit", "regex", @@ -4261,7 +4478,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "pem 1.1.1", "ring", "serde", @@ -4278,6 +4495,15 @@ dependencies = [ "duct", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -4374,9 +4600,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libflate" @@ -4414,13 +4640,24 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +[[package]] +name = "libsqlite3-sys" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libtest-mimic" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" dependencies = [ - "clap 4.4.2", + "clap", "termcolor", "threadpool", ] @@ -4472,7 +4709,7 @@ checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "local_stats_alloc" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "workspace-hack", ] @@ -4492,6 +4729,9 @@ name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +dependencies = [ + "value-bag", +] [[package]] name = "loom" @@ -4624,21 +4864,21 @@ dependencies = [ [[package]] name = "madsim-etcd-client" -version = "0.3.0+0.11.1" +version = "0.4.0+0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c26d21c8d69c25db9d461ab7dfa4b09bd982687546c8ca2c43d743533a8f1c3f" +checksum = "02b4b5de48bb7f3f7eae0bca62b3ed0b7d714b1b273d7347329b92c3a2eef113" dependencies = [ "etcd-client", "futures-util", "http", "madsim", "serde", - "serde_with 2.3.3", + "serde_with 3.4.0", "spin 0.9.8", "thiserror", "tokio", - "toml 0.7.8", - "tonic", + "toml 0.8.2", + "tonic 0.10.2", "tracing", ] @@ -4656,8 +4896,9 @@ dependencies = [ [[package]] name = "madsim-rdkafka" -version = "0.2.22" -source = "git+https://github.com/madsim-rs/madsim.git?rev=bb8f063#bb8f06384517ea3950b6c7a29a32c233058b89c7" +version = "0.3.0+0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00f9ab2d0545a55e4f209fc72c180a7e7b45a4e7baee7b4994c4628a877c5525" dependencies = [ "async-channel", "async-trait", @@ -4690,29 +4931,31 @@ dependencies = [ [[package]] name = "madsim-tonic" -version = "0.3.1+0.9.2" +version = "0.4.0+0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66177cce816367f8358a4dc482eabff8f979cf8a1d3288d3aa8dd822fb327c69" +checksum = "3b4d847e67d6f8319d7c5393121556e2a987f5b744967a0f9b84e502020239d3" dependencies = [ "async-stream", "chrono", "futures-util", "madsim", - "tonic", + "tokio", + "tonic 0.10.2", + "tower", "tracing", ] [[package]] name = "madsim-tonic-build" -version = "0.3.1+0.9.2" +version = "0.4.2+0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55f6b2947243e5ae6a37c7992da07cf3ed60ebeb6a3d2c1e95574a2a2697b0c0" +checksum = "4a2ad2776ba20221ccbe4e136e2fa0f7ab90eebd608373177f3e74a198a288ec" dependencies = [ - "prettyplease", + "prettyplease 0.2.15", "proc-macro2", - "prost-build", + "prost-build 0.12.1", "quote", - "syn 1.0.109", + "syn 2.0.37", "tonic-build", ] @@ -4761,7 +5004,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -4790,11 +5033,11 @@ dependencies = [ [[package]] name = "memfd" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" +checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.37.23", + "rustix 0.38.11", ] [[package]] @@ -4894,14 +5137,23 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "model_migration" +version = "0.1.0" +dependencies = [ + "async-std", + "sea-orm-migration", + "uuid", +] + [[package]] name = "moka" -version = "0.11.3" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6e72583bf6830c956235bff0d5afec8cf2952f579ebad18ae7821a917d950f" +checksum = "8dc65d4615c08c8a13d91fd404b5a2a4485ba35b4091e3315cf8798d280c2f29" dependencies = [ - "async-io", "async-lock", + "async-trait", "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", @@ -4910,7 +5162,6 @@ dependencies = [ "parking_lot 0.12.1", "quanta", "rustc_version", - "scheduled-thread-pool", "skeptic", "smallvec", "tagptr", @@ -4930,10 +5181,22 @@ name = "multimap" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "multimap" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70db9248a93dc36a36d9a47898caa007a32755c7ad140ec64eeeb50d5a730631" dependencies = [ "serde", ] +[[package]] +name = "murmur3" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9252111cf132ba0929b6f8e030cac2a24b507f3a4d6db6fb2896f27b354c714b" + [[package]] name = "mysql-common-derive" version = "0.30.2" @@ -4947,7 +5210,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", "termcolor", "thiserror", ] @@ -4991,7 +5254,7 @@ version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bigdecimal", "bindgen", "bitflags 2.4.0", @@ -5016,7 +5279,7 @@ dependencies = [ "serde", "serde_json", "sha1", - "sha2 0.10.7", + "sha2", "smallvec", "subprocess", "thiserror", @@ -5092,9 +5355,9 @@ dependencies = [ [[package]] name = "nkeys" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9261eb915c785ea65708bc45ef43507ea46914e1a73f1412d1a38aba967c8e" +checksum = "aad178aad32087b19042ee36dfd450b73f5f934fbfb058b59b198684dfec4c47" dependencies = [ "byteorder", "data-encoding", @@ -5155,11 +5418,10 @@ dependencies = [ [[package]] name = "nuid" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c1bb65186718d348306bf1afdeb20d9ab45b2ab80fb793c0fdcf59ffbb4f38" +checksum = "fc895af95856f929163a0aa20c26a78d26bfdc839f51b9d5aa7a5b79e52b7e83" dependencies = [ - "lazy_static", "rand", ] @@ -5273,7 +5535,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", ] @@ -5328,7 +5590,7 @@ dependencies = [ "serde", "serde_json", "serde_path_to_error", - "sha2 0.10.7", + "sha2", "thiserror", "url", ] @@ -5367,22 +5629,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] -name = "opaque-debug" -version = "0.3.0" +name = "opendal" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "1ad95e460e5976ab1b74f398ab856c59f8417b3dd32202329e3491dcbe3a6b84" +dependencies = [ + "anyhow", + "async-compat", + "async-trait", + "backon", + "base64 0.21.4", + "bytes", + "chrono", + "flagset", + "futures", + "http", + "hyper", + "log", + "md-5", + "once_cell", + "parking_lot 0.12.1", + "percent-encoding", + "pin-project", + "quick-xml 0.29.0", + "reqsign", + "reqwest", + "serde", + "serde_json", + "sha2", + "tokio", + "uuid", +] [[package]] name = "opendal" -version = "0.39.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad95e460e5976ab1b74f398ab856c59f8417b3dd32202329e3491dcbe3a6b84" +checksum = "ddba7299bab261d3ae2f37617fb7f45b19ed872752bb4e22cf93a69d979366c5" dependencies = [ "anyhow", "async-compat", "async-trait", "backon", - "base64 0.21.3", + "base64 0.21.4", "bytes", "chrono", "flagset", @@ -5395,12 +5684,13 @@ dependencies = [ "parking_lot 0.12.1", "percent-encoding", "pin-project", + "prometheus", "quick-xml 0.29.0", "reqsign", "reqwest", "serde", "serde_json", - "sha2 0.10.7", + "sha2", "tokio", "uuid", ] @@ -5455,7 +5745,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -5509,10 +5799,10 @@ dependencies = [ "opentelemetry-semantic-conventions", "opentelemetry_api", "opentelemetry_sdk", - "prost", + "prost 0.11.9", "thiserror", "tokio", - "tonic", + "tonic 0.9.2", ] [[package]] @@ -5523,8 +5813,8 @@ checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" dependencies = [ "opentelemetry_api", "opentelemetry_sdk", - "prost", - "tonic", + "prost 0.11.9", + "tonic 0.9.2", ] [[package]] @@ -5596,8 +5886,7 @@ dependencies = [ [[package]] name = "ordered-multimap" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" +source = "git+https://github.com/risingwavelabs/ordered-multimap-rs.git?rev=19c743f#19c743f3e3d106c99ba37628f06a2ca6faa2284f" dependencies = [ "dlv-list", "hashbrown 0.13.2", @@ -5613,6 +5902,30 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ouroboros" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2ba07320d39dfea882faa70554b4bd342a5f273ed59ba7c1c6b4c840492c954" +dependencies = [ + "aliasable", + "ouroboros_macro", + "static_assertions", +] + +[[package]] +name = "ouroboros_macro" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec4c6225c69b4ca778c0aea097321a64c421cf4577b331c61b229267edabb6f8" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.37", +] + [[package]] name = "outref" version = "0.5.1" @@ -5696,9 +6009,9 @@ dependencies = [ [[package]] name = "parquet" -version = "46.0.0" +version = "47.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad2cba786ae07da4d73371a88b9e0f9d3ffac1a9badc83922e0e15814f5c5fa" +checksum = "0463cc3b256d5f50408c49a4be3a16674f4c8ceef60941709620a062b1f6bf4d" dependencies = [ "ahash 0.8.3", "arrow-array", @@ -5708,7 +6021,7 @@ dependencies = [ "arrow-ipc", "arrow-schema", "arrow-select", - "base64 0.21.3", + "base64 0.21.4", "brotli", "bytes", "chrono", @@ -5750,7 +6063,7 @@ dependencies = [ "regex", "regex-syntax 0.7.5", "structmeta", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -5788,11 +6101,11 @@ dependencies = [ [[package]] name = "pbjson" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048f9ac93c1eab514f9470c4bc8d97ca2a0a236b84f45cc19d69a59fc11467f6" +checksum = "1030c719b0ec2a2d25a5df729d6cff1acf3cc230bf766f4f97833591f7577b90" dependencies = [ - "base64 0.13.1", + "base64 0.21.4", "serde", ] @@ -5804,8 +6117,8 @@ checksum = "bdbb7b706f2afc610f3853550cdbbf6372fd324824a087806bd4480ea4996e24" dependencies = [ "heck 0.4.1", "itertools 0.10.5", - "prost", - "prost-types", + "prost 0.11.9", + "prost-types 0.11.9", ] [[package]] @@ -5829,19 +6142,10 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "serde", ] -[[package]] -name = "pem-rfc7468" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f22eb0e3c593294a99e9ff4b24cf6b752d43f193aa4415fe5077c159996d497" -dependencies = [ - "base64ct", -] - [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -5880,7 +6184,7 @@ dependencies = [ [[package]] name = "pgwire" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "byteorder", @@ -5955,7 +6259,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -5976,21 +6280,9 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der 0.7.8", - "pkcs8 0.10.2", - "spki 0.7.2", -] - -[[package]] -name = "pkcs8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" -dependencies = [ - "der 0.4.5", - "pem-rfc7468 0.2.3", - "spki 0.4.1", - "zeroize", + "der", + "pkcs8", + "spki", ] [[package]] @@ -5999,8 +6291,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.8", - "spki 0.7.2", + "der", + "spki", ] [[package]] @@ -6009,6 +6301,12 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "platforms" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" + [[package]] name = "plotters" version = "0.3.5" @@ -6082,7 +6380,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -6091,7 +6389,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "byteorder", "bytes", "fallible-iterator", @@ -6099,7 +6397,7 @@ dependencies = [ "md-5", "memchr", "rand", - "sha2 0.10.7", + "sha2", "stringprep", ] @@ -6119,11 +6417,17 @@ dependencies = [ ] [[package]] -name = "pprof" -version = "0.12.1" +name = "powerfmt" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978385d59daf9269189d052ca8a84c1acfd0715c0599a5d5188d4acc078ca46a" -dependencies = [ +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "pprof" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" +dependencies = [ "backtrace", "cfg-if", "findshlibs", @@ -6175,6 +6479,12 @@ dependencies = [ "termtree", ] +[[package]] +name = "prehash" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04bfa62906ce8d9badf8d1764501640ae7f0bcea3437a209315830e0f73564d1" + [[package]] name = "prepare_ci_pubsub" version = "0.1.0" @@ -6211,6 +6521,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prettyplease" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +dependencies = [ + "proc-macro2", + "syn 2.0.37", +] + [[package]] name = "priority-queue" version = "1.3.2" @@ -6237,7 +6557,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] @@ -6272,9 +6592,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -6289,7 +6609,7 @@ dependencies = [ "byteorder", "hex", "lazy_static", - "rustix 0.36.15", + "rustix 0.36.16", ] [[package]] @@ -6302,7 +6622,7 @@ dependencies = [ "byteorder", "hex", "lazy_static", - "rustix 0.36.15", + "rustix 0.36.16", ] [[package]] @@ -6324,13 +6644,13 @@ dependencies = [ [[package]] name = "prometheus-http-query" -version = "0.6.6" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7970fd6e91b5cb87e9a093657572a896d133879ced7752d2c7635beae29eaba0" +checksum = "8e7c6186f0b66203811641c88ca4e5817182caa7553868359bafa5b17d97f37f" dependencies = [ + "mime", "reqwest", "serde", - "serde_json", "time", "url", ] @@ -6342,7 +6662,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.11.9", +] + +[[package]] +name = "prost" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" +dependencies = [ + "bytes", + "prost-derive 0.12.1", ] [[package]] @@ -6356,17 +6686,39 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "log", - "multimap", + "multimap 0.8.3", "petgraph", - "prettyplease", - "prost", - "prost-types", + "prettyplease 0.1.25", + "prost 0.11.9", + "prost-types 0.11.9", "regex", "syn 1.0.109", "tempfile", "which", ] +[[package]] +name = "prost-build" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" +dependencies = [ + "bytes", + "heck 0.4.1", + "itertools 0.11.0", + "log", + "multimap 0.8.3", + "once_cell", + "petgraph", + "prettyplease 0.2.15", + "prost 0.12.1", + "prost-types 0.12.1", + "regex", + "syn 2.0.37", + "tempfile", + "which", +] + [[package]] name = "prost-derive" version = "0.11.9" @@ -6380,25 +6732,37 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prost-derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +dependencies = [ + "anyhow", + "itertools 0.11.0", + "proc-macro2", + "quote", + "syn 2.0.37", +] + [[package]] name = "prost-helpers" version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", - "workspace-hack", + "syn 2.0.37", ] [[package]] name = "prost-reflect" -version = "0.11.5" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b823de344848e011658ac981009100818b322421676740546f8b52ed5249428" +checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" dependencies = [ "once_cell", - "prost", - "prost-types", + "prost 0.12.1", + "prost-types 0.12.1", ] [[package]] @@ -6407,7 +6771,16 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "prost", + "prost 0.11.9", +] + +[[package]] +name = "prost-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +dependencies = [ + "prost 0.12.1", ] [[package]] @@ -6511,9 +6884,9 @@ dependencies = [ "oauth2", "openidconnect", "pem 1.1.1", - "prost", - "prost-build", - "prost-derive", + "prost 0.11.9", + "prost-build 0.11.9", + "prost-derive 0.11.9", "rand", "regex", "serde", @@ -6595,7 +6968,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -6605,15 +6978,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" - [[package]] name = "rand_core" version = "0.6.4" @@ -6629,7 +6996,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -6665,8 +7032,9 @@ dependencies = [ [[package]] name = "rdkafka-sys" -version = "4.3.0+1.9.2" -source = "git+https://github.com/MaterializeInc/rust-rdkafka?rev=8ea07c4#8ea07c4d2b96636ff093e670bc921892aee0d56a" +version = "4.6.0+2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad63c279fca41a27c231c450a2d2ad18288032e9cbb159ad16c9d96eba35aaaf" dependencies = [ "cmake", "libc", @@ -6684,12 +7052,19 @@ version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f49cdc0bb3f412bf8e7d1bd90fe1d9eb10bc5c399ba90973c14662a27b3f8ba" dependencies = [ + "async-std", + "async-trait", + "bytes", "combine", + "futures-util", "itoa", "percent-encoding", + "pin-project-lite", "ryu", "sha1_smol", "socket2 0.4.9", + "tokio", + "tokio-util", "url", ] @@ -6724,9 +7099,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4dcbd3a2ae7fb94b5813fa0e957c6ab51bf5d0a8ee1b69e0c2d0f1e6eb8485" +checksum = "ad156d539c879b7a24a363a2016d77961786e71f48f2e2fc8302a92abd2429a6" dependencies = [ "hashbrown 0.13.2", "log", @@ -6737,14 +7112,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", + "regex-automata 0.4.1", + "regex-syntax 0.8.0", ] [[package]] @@ -6758,13 +7133,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.0", ] [[package]] @@ -6779,6 +7154,12 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +[[package]] +name = "regex-syntax" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3cbb081b9784b07cceb8824c8583f86db4814d172ab043f3c23f7dc600bf83d" + [[package]] name = "rend" version = "0.4.0" @@ -6796,7 +7177,7 @@ checksum = "3228e570df74d69d3d3236a71371f1edd748a3e4eb728ea1f29d403bc10fc727" dependencies = [ "anyhow", "async-trait", - "base64 0.21.3", + "base64 0.21.4", "chrono", "form_urlencoded", "hex", @@ -6815,7 +7196,7 @@ dependencies = [ "serde", "serde_json", "sha1", - "sha2 0.10.7", + "sha2", "tokio", ] @@ -6825,7 +7206,7 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -6890,11 +7271,11 @@ dependencies = [ [[package]] name = "risedev" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "chrono", - "clap 4.4.2", + "clap", "console", "fs-err", "glob", @@ -6908,7 +7289,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "serde_with 3.3.0", + "serde_with 3.4.0", "serde_yaml", "tempfile", "tracing", @@ -6919,10 +7300,10 @@ dependencies = [ [[package]] name = "risedev-config" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", - "clap 4.4.2", + "clap", "console", "dialoguer", "enum-iterator", @@ -6932,14 +7313,14 @@ dependencies = [ [[package]] name = "risingwave_backup" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "async-trait", "bytes", "itertools 0.11.0", "parking_lot 0.12.1", - "prost", + "prost 0.12.1", "risingwave_common", "risingwave_hummock_sdk", "risingwave_object_store", @@ -6950,26 +7331,13 @@ dependencies = [ "twox-hash", ] -[[package]] -name = "risingwave_backup_cmd" -version = "1.1.0-alpha" -dependencies = [ - "clap 4.4.2", - "madsim-tokio", - "prometheus", - "risingwave_backup", - "risingwave_meta", - "risingwave_rt", -] - [[package]] name = "risingwave_batch" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "assert_matches", "async-recursion", - "async-stream", "async-trait", "criterion", "either", @@ -6988,6 +7356,7 @@ dependencies = [ "risingwave_common", "risingwave_connector", "risingwave_expr", + "risingwave_expr_impl", "risingwave_hummock_sdk", "risingwave_pb", "risingwave_rpc_client", @@ -7002,13 +7371,12 @@ dependencies = [ "tokio-metrics", "tokio-stream", "tracing", - "uuid", "workspace-hack", ] [[package]] name = "risingwave_bench" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "async-trait", "aws-config", @@ -7017,7 +7385,7 @@ dependencies = [ "bcc", "bytes", "bytesize", - "clap 4.4.2", + "clap", "futures", "hdrhistogram", "itertools 0.11.0", @@ -7033,7 +7401,7 @@ dependencies = [ "risingwave_storage", "serde", "tokio-stream", - "toml 0.7.8", + "toml 0.8.2", "tracing", "tracing-subscriber", "workspace-hack", @@ -7041,17 +7409,18 @@ dependencies = [ [[package]] name = "risingwave_cmd" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ - "clap 4.4.2", + "clap", "madsim-tokio", "prometheus", "risingwave_common", "risingwave_compactor", "risingwave_compute", "risingwave_ctl", + "risingwave_expr_impl", "risingwave_frontend", - "risingwave_meta", + "risingwave_meta_node", "risingwave_rt", "task_stats_alloc", "tikv-jemallocator", @@ -7061,10 +7430,10 @@ dependencies = [ [[package]] name = "risingwave_cmd_all" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", - "clap 4.4.2", + "clap", "console", "const-str", "expect-test", @@ -7075,8 +7444,9 @@ dependencies = [ "risingwave_compactor", "risingwave_compute", "risingwave_ctl", + "risingwave_expr_impl", "risingwave_frontend", - "risingwave_meta", + "risingwave_meta_node", "risingwave_rt", "shell-words", "strum 0.25.0", @@ -7092,21 +7462,23 @@ dependencies = [ [[package]] name = "risingwave_common" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "arc-swap", "arrow-array", "arrow-buffer", + "arrow-cast", "arrow-schema", "async-trait", "auto_enums", + "auto_impl", "bitflags 2.4.0", "byteorder", "bytes", "chrono", "chrono-tz", - "clap 4.4.2", + "clap", "comfy-table", "crc32fast", "criterion", @@ -7122,7 +7494,7 @@ dependencies = [ "hex", "http", "http-body", - "humantime 2.1.0", + "humantime", "hyper", "hytra", "itertools 0.11.0", @@ -7143,10 +7515,11 @@ dependencies = [ "paste", "pin-project-lite", "postgres-types", + "prehash", "pretty_assertions", "procfs 0.15.1", "prometheus", - "prost", + "prost 0.12.1", "rand", "regex", "reqwest", @@ -7159,7 +7532,7 @@ dependencies = [ "serde_bytes", "serde_default", "serde_json", - "serde_with 3.3.0", + "serde_with 3.4.0", "smallbitset", "speedate", "static_assertions", @@ -7169,7 +7542,7 @@ dependencies = [ "tempfile", "thiserror", "tinyvec", - "toml 0.7.8", + "toml 0.8.2", "tower-layer", "tower-service", "tracing", @@ -7181,21 +7554,33 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "risingwave_common_heap_profiling" +version = "1.3.0-alpha" +dependencies = [ + "anyhow", + "chrono", + "madsim-tokio", + "parking_lot 0.12.1", + "risingwave_common", + "tikv-jemalloc-ctl", + "tracing", +] + [[package]] name = "risingwave_common_proc_macro" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "bae", "proc-macro-error", "proc-macro2", "quote", "syn 1.0.109", - "workspace-hack", ] [[package]] name = "risingwave_common_service" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "async-trait", "futures", @@ -7214,12 +7599,12 @@ dependencies = [ [[package]] name = "risingwave_compaction_test" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "async-trait", "bytes", - "clap 4.4.2", + "clap", "futures", "madsim-tokio", "prometheus", @@ -7229,6 +7614,7 @@ dependencies = [ "risingwave_hummock_sdk", "risingwave_hummock_test", "risingwave_meta", + "risingwave_meta_node", "risingwave_object_store", "risingwave_pb", "risingwave_rpc_client", @@ -7240,37 +7626,35 @@ dependencies = [ [[package]] name = "risingwave_compactor" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ - "anyhow", "async-trait", "await-tree", - "clap 4.4.2", + "clap", "madsim-tokio", "madsim-tonic", "parking_lot 0.12.1", - "prometheus", "risingwave_common", + "risingwave_common_heap_profiling", "risingwave_common_service", "risingwave_object_store", "risingwave_pb", "risingwave_rpc_client", "risingwave_storage", "serde", - "serde_json", "tracing", "workspace-hack", ] [[package]] name = "risingwave_compute" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "async-trait", "await-tree", "chrono", - "clap 4.4.2", + "clap", "either", "futures", "futures-async-stream", @@ -7284,9 +7668,11 @@ dependencies = [ "rand", "risingwave_batch", "risingwave_common", + "risingwave_common_heap_profiling", "risingwave_common_service", "risingwave_connector", "risingwave_hummock_sdk", + "risingwave_jni_core", "risingwave_pb", "risingwave_rpc_client", "risingwave_source", @@ -7304,7 +7690,7 @@ dependencies = [ [[package]] name = "risingwave_connector" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "apache-avro 0.15.0 (git+https://github.com/risingwavelabs/avro?branch=idx0dev/resolved_schema)", @@ -7320,8 +7706,7 @@ dependencies = [ "aws-sdk-s3", "aws-smithy-http", "aws-types", - "base64 0.21.3", - "bincode 1.3.3", + "base64 0.21.4", "byteorder", "bytes", "chrono", @@ -7329,52 +7714,63 @@ dependencies = [ "criterion", "csv", "duration-str", + "easy-ext", "enum-as-inner", "futures", "futures-async-stream", "glob", "google-cloud-pubsub", + "http", "hyper", "hyper-tls", "icelake", + "indexmap 1.9.3", "itertools 0.11.0", + "jni", "jsonschema-transpiler", "madsim-rdkafka", "madsim-tokio", - "madsim-tonic", "maplit", "moka", "mysql_async", "mysql_common", "nexmark", "num-bigint", - "opendal", "parking_lot 0.12.1", "paste", "prometheus", - "prost", - "prost-build", + "prost 0.12.1", + "prost-build 0.12.1", "prost-reflect", - "prost-types", + "prost-types 0.12.1", "protobuf-native", "pulsar", "rand", + "redis", + "regex", "reqwest", "risingwave_common", + "risingwave_jni_core", "risingwave_pb", "risingwave_rpc_client", "rust_decimal", "serde", "serde_derive", "serde_json", - "serde_with 3.3.0", + "serde_with 3.4.0", "simd-json", + "strum 0.25.0", + "strum_macros 0.25.2", "tempfile", "thiserror", + "time", "tokio-retry", "tokio-stream", "tokio-util", + "tonic 0.9.2", "tracing", + "tracing-futures", + "tracing-test", "url", "urlencoding", "workspace-hack", @@ -7382,12 +7778,12 @@ dependencies = [ [[package]] name = "risingwave_ctl" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "bytes", "chrono", - "clap 4.4.2", + "clap", "comfy-table", "futures", "inquire", @@ -7416,11 +7812,11 @@ dependencies = [ [[package]] name = "risingwave_e2e_extended_mode_test" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "chrono", - "clap 4.4.2", + "clap", "madsim-tokio", "pg_interval", "rust_decimal", @@ -7431,47 +7827,68 @@ dependencies = [ [[package]] name = "risingwave_expr" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ - "aho-corasick", "anyhow", "arrow-array", "arrow-schema", "async-trait", - "auto_enums", + "auto_impl", "await-tree", - "base64 0.21.3", + "cfg-or-panic", "chrono", - "chrono-tz", - "criterion", "ctor", "downcast-rs", "easy-ext", "either", "expect-test", - "futures", "futures-async-stream", "futures-util", - "hex", "itertools 0.11.0", "madsim-tokio", - "md5", "num-traits", "parse-display", "paste", - "regex", "risingwave_common", "risingwave_expr_macro", "risingwave_pb", "risingwave_udf", + "smallvec", + "static_assertions", + "thiserror", + "tracing", + "workspace-hack", +] + +[[package]] +name = "risingwave_expr_impl" +version = "1.3.0-alpha" +dependencies = [ + "aho-corasick", + "anyhow", + "async-trait", + "auto_enums", + "chrono", + "criterion", + "expect-test", + "fancy-regex", + "futures-async-stream", + "futures-util", + "hex", + "itertools 0.11.0", + "madsim-tokio", + "md5", + "num-traits", + "regex", + "risingwave_common", + "risingwave_expr", + "risingwave_pb", "rust_decimal", "self_cell", "serde", "serde_json", "sha1", - "sha2 0.10.7", - "smallvec", - "static_assertions", + "sha2", "thiserror", "tracing", "workspace-hack", @@ -7484,12 +7901,12 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] name = "risingwave_frontend" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "arc-swap", @@ -7498,10 +7915,10 @@ dependencies = [ "async-recursion", "async-trait", "auto_enums", - "base64 0.21.3", + "base64 0.21.4", "bk-tree", "bytes", - "clap 4.4.2", + "clap", "downcast-rs", "dyn-clone", "easy-ext", @@ -7516,7 +7933,6 @@ dependencies = [ "madsim-tonic", "maplit", "md5", - "more-asserts", "num-integer", "parking_lot 0.12.1", "parse-display", @@ -7534,6 +7950,7 @@ dependencies = [ "risingwave_common_service", "risingwave_connector", "risingwave_expr", + "risingwave_expr_impl", "risingwave_pb", "risingwave_rpc_client", "risingwave_source", @@ -7543,7 +7960,7 @@ dependencies = [ "risingwave_variables", "serde", "serde_json", - "sha2 0.10.7", + "sha2", "smallvec", "tempfile", "thiserror", @@ -7556,7 +7973,7 @@ dependencies = [ [[package]] name = "risingwave_hummock_sdk" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "bytes", "hex", @@ -7570,11 +7987,11 @@ dependencies = [ [[package]] name = "risingwave_hummock_test" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "async-trait", "bytes", - "clap 4.4.2", + "clap", "criterion", "expect-test", "fail", @@ -7602,7 +8019,7 @@ dependencies = [ [[package]] name = "risingwave_hummock_trace" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "async-trait", "bincode 2.0.0-rc.3", @@ -7614,7 +8031,7 @@ dependencies = [ "madsim-tokio", "mockall", "parking_lot 0.12.1", - "prost", + "prost 0.12.1", "risingwave_common", "risingwave_hummock_sdk", "risingwave_pb", @@ -7626,7 +8043,8 @@ dependencies = [ name = "risingwave_java_binding" version = "0.1.0" dependencies = [ - "prost", + "jni", + "prost 0.12.1", "risingwave_common", "risingwave_expr", "risingwave_jni_core", @@ -7639,12 +8057,15 @@ dependencies = [ name = "risingwave_jni_core" version = "0.1.0" dependencies = [ + "anyhow", "bytes", + "cfg-or-panic", "futures", "itertools 0.11.0", "jni", "madsim-tokio", - "prost", + "paste", + "prost 0.12.1", "risingwave_common", "risingwave_expr", "risingwave_hummock_sdk", @@ -7659,7 +8080,7 @@ dependencies = [ [[package]] name = "risingwave_meta" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "arc-swap", @@ -7668,8 +8089,9 @@ dependencies = [ "aws-config", "aws-sdk-ec2", "axum", + "base64-url", "bytes", - "clap 4.4.2", + "clap", "crepe", "easy-ext", "either", @@ -7686,18 +8108,18 @@ dependencies = [ "maplit", "memcomparable", "mime_guess", + "model_migration", "num-integer", "num-traits", "parking_lot 0.12.1", "prometheus", "prometheus-http-query", - "prost", + "prost 0.12.1", "rand", - "regex", "reqwest", "risingwave_backup", "risingwave_common", - "risingwave_common_service", + "risingwave_common_heap_profiling", "risingwave_connector", "risingwave_hummock_sdk", "risingwave_object_store", @@ -7706,11 +8128,10 @@ dependencies = [ "risingwave_sqlparser", "risingwave_test_runner", "scopeguard", + "sea-orm", "serde", "serde_json", - "static_assertions", "sync-point", - "tempfile", "thiserror", "tokio-retry", "tokio-stream", @@ -7722,9 +8143,59 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "risingwave_meta_node" +version = "1.3.0-alpha" +dependencies = [ + "anyhow", + "clap", + "either", + "futures", + "itertools 0.11.0", + "madsim-etcd-client", + "madsim-tokio", + "madsim-tonic", + "model_migration", + "prometheus-http-query", + "regex", + "risingwave_common", + "risingwave_common_heap_profiling", + "risingwave_common_service", + "risingwave_meta", + "risingwave_meta_service", + "risingwave_pb", + "risingwave_rpc_client", + "sea-orm", + "tracing", + "workspace-hack", +] + +[[package]] +name = "risingwave_meta_service" +version = "1.3.0-alpha" +dependencies = [ + "anyhow", + "async-trait", + "either", + "futures", + "itertools 0.11.0", + "madsim-tokio", + "madsim-tonic", + "regex", + "risingwave_common", + "risingwave_connector", + "risingwave_meta", + "risingwave_pb", + "sea-orm", + "sync-point", + "tokio-stream", + "tracing", + "workspace-hack", +] + [[package]] name = "risingwave_object_store" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "async-trait", "await-tree", @@ -7741,7 +8212,7 @@ dependencies = [ "itertools 0.11.0", "madsim-aws-sdk-s3", "madsim-tokio", - "opendal", + "opendal 0.39.0", "prometheus", "risingwave_common", "spin 0.9.8", @@ -7752,7 +8223,7 @@ dependencies = [ [[package]] name = "risingwave_pb" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "enum-as-inner", "fs-err", @@ -7760,16 +8231,17 @@ dependencies = [ "madsim-tonic-build", "pbjson", "pbjson-build", - "prost", + "prost 0.12.1", "prost-helpers", "serde", + "strum 0.25.0", "walkdir", "workspace-hack", ] [[package]] name = "risingwave_planner_test" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "expect-test", @@ -7777,10 +8249,11 @@ dependencies = [ "libtest-mimic", "madsim-tokio", "paste", + "risingwave_expr_impl", "risingwave_frontend", "risingwave_sqlparser", "serde", - "serde_with 3.3.0", + "serde_with 3.4.0", "serde_yaml", "tempfile", "walkdir", @@ -7789,10 +8262,10 @@ dependencies = [ [[package]] name = "risingwave_regress_test" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", - "clap 4.4.2", + "clap", "madsim-tokio", "path-absolutize", "similar", @@ -7803,7 +8276,7 @@ dependencies = [ [[package]] name = "risingwave_rpc_client" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "async-trait", @@ -7832,10 +8305,9 @@ dependencies = [ [[package]] name = "risingwave_rt" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "await-tree", - "chrono", "console", "console-subscriber", "either", @@ -7847,9 +8319,9 @@ dependencies = [ "opentelemetry-semantic-conventions", "parking_lot 0.12.1", "pprof", - "prometheus", "risingwave_common", "risingwave_variables", + "rlimit", "time", "tracing", "tracing-opentelemetry", @@ -7864,7 +8336,7 @@ dependencies = [ "anyhow", "async-trait", "cfg-or-panic", - "clap 4.4.2", + "clap", "console", "futures", "glob", @@ -7886,8 +8358,9 @@ dependencies = [ "risingwave_connector", "risingwave_ctl", "risingwave_e2e_extended_mode_test", + "risingwave_expr_impl", "risingwave_frontend", - "risingwave_meta", + "risingwave_meta_node", "risingwave_pb", "risingwave_rpc_client", "risingwave_sqlparser", @@ -7899,18 +8372,18 @@ dependencies = [ "tempfile", "tikv-jemallocator", "tokio-postgres", + "tokio-stream", "tracing", "tracing-subscriber", ] [[package]] name = "risingwave_source" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "assert_matches", "criterion", - "easy-ext", "futures", "futures-async-stream", "itertools 0.11.0", @@ -7928,7 +8401,7 @@ dependencies = [ [[package]] name = "risingwave_sqlparser" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "itertools 0.11.0", "matches", @@ -7947,7 +8420,7 @@ dependencies = [ "madsim-tokio", "risingwave_sqlparser", "serde", - "serde_with 3.3.0", + "serde_with 3.4.0", "serde_yaml", "walkdir", "workspace-hack", @@ -7955,11 +8428,11 @@ dependencies = [ [[package]] name = "risingwave_sqlsmith" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "chrono", - "clap 4.4.2", + "clap", "expect-test", "itertools 0.11.0", "libtest-mimic", @@ -7969,6 +8442,7 @@ dependencies = [ "regex", "risingwave_common", "risingwave_expr", + "risingwave_expr_impl", "risingwave_frontend", "risingwave_pb", "risingwave_sqlparser", @@ -7981,29 +8455,28 @@ dependencies = [ [[package]] name = "risingwave_state_cleaning_test" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", - "clap 4.4.2", + "clap", "futures", "madsim-tokio", "prometheus", "regex", "risingwave_rt", "serde", - "serde_with 3.3.0", + "serde_with 3.4.0", "tokio-postgres", "tokio-stream", - "toml 0.7.8", + "toml 0.8.2", "tracing", "workspace-hack", ] [[package]] name = "risingwave_storage" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ - "anyhow", "arc-swap", "async-trait", "auto_enums", @@ -8027,6 +8500,7 @@ dependencies = [ "lz4", "mach2", "madsim-tokio", + "madsim-tonic", "memcomparable", "moka", "more-asserts", @@ -8035,7 +8509,7 @@ dependencies = [ "parking_lot 0.12.1", "procfs 0.15.1", "prometheus", - "prost", + "prost 0.12.1", "rand", "risingwave_backup", "risingwave_common", @@ -8060,12 +8534,12 @@ dependencies = [ "workspace-hack", "xorf", "xxhash-rust", - "zstd 0.12.4", + "zstd 0.13.0", ] [[package]] name = "risingwave_stream" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "anyhow", "assert_matches", @@ -8075,7 +8549,6 @@ dependencies = [ "await-tree", "bytes", "criterion", - "dyn-clone", "educe", "either", "enum-as-inner", @@ -8084,7 +8557,6 @@ dependencies = [ "futures-async-stream", "governor", "hytra", - "iter-chunks", "itertools 0.11.0", "local_stats_alloc", "lru 0.7.6", @@ -8092,18 +8564,16 @@ dependencies = [ "madsim-tonic", "maplit", "memcomparable", - "multimap", - "num-traits", + "multimap 0.9.0", "parking_lot 0.12.1", - "parse-display", "pin-project", "prometheus", - "prost", + "prost 0.12.1", "rand", "risingwave_common", "risingwave_connector", "risingwave_expr", - "risingwave_frontend", + "risingwave_expr_impl", "risingwave_hummock_sdk", "risingwave_hummock_test", "risingwave_pb", @@ -8114,7 +8584,6 @@ dependencies = [ "serde_json", "serde_yaml", "smallvec", - "spin 0.9.8", "static_assertions", "task_stats_alloc", "thiserror", @@ -8127,7 +8596,7 @@ dependencies = [ [[package]] name = "risingwave_test_runner" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "fail", "sync-point", @@ -8143,8 +8612,9 @@ dependencies = [ "arrow-ipc", "arrow-schema", "arrow-select", - "base64 0.21.3", + "base64 0.21.4", "bytes", + "cfg-or-panic", "futures-util", "itertools 0.11.0", "madsim-tokio", @@ -8159,7 +8629,7 @@ dependencies = [ [[package]] name = "risingwave_variables" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "chrono", "workspace-hack", @@ -8199,6 +8669,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" +[[package]] +name = "rlimit" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" +dependencies = [ + "libc", +] + [[package]] name = "rsa" version = "0.9.2" @@ -8206,17 +8685,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8" dependencies = [ "byteorder", - "const-oid 0.9.5", - "digest 0.10.7", + "const-oid", + "digest", "num-bigint-dig", "num-integer", "num-iter", "num-traits", "pkcs1", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "signature 2.1.0", - "spki 0.7.2", + "pkcs8", + "rand_core", + "signature", + "spki", "subtle", "zeroize", ] @@ -8271,9 +8750,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.15" +version = "0.36.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" +checksum = "6da3636faa25820d8648e0e31c5d519bbb01f72fdf57131f0f5f7da5fed36eab" dependencies = [ "bitflags 1.3.2", "errno", @@ -8354,7 +8833,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", ] [[package]] @@ -8438,15 +8917,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "scheduled-thread-pool" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" -dependencies = [ - "parking_lot 0.12.1", -] - [[package]] name = "scoped-tls" version = "1.0.1" @@ -8475,6 +8945,165 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sea-bae" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bd3534a9978d0aa7edd2808dc1f8f31c4d0ecd31ddf71d997b3c98e9f3c9114" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.37", +] + +[[package]] +name = "sea-orm" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61f6c7daef05dde3476d97001e11fca7a52b655aa3bf4fd610ab2da1176a2ed5" +dependencies = [ + "async-stream", + "async-trait", + "bigdecimal", + "chrono", + "futures", + "log", + "ouroboros", + "rust_decimal", + "sea-orm-macros", + "sea-query", + "sea-query-binder", + "serde", + "serde_json", + "sqlx", + "strum 0.25.0", + "thiserror", + "time", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "sea-orm-cli" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e3f0ff2fa5672e2e7314d107c6498a18e469beeb340a0ed84e3075fce73c2cd" +dependencies = [ + "chrono", + "clap", + "dotenvy", + "glob", + "regex", + "sea-schema", + "tracing", + "tracing-subscriber", + "url", +] + +[[package]] +name = "sea-orm-macros" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd90e73d5f5b184bad525767da29fbfec132b4e62ebd6f60d2f2737ec6468f62" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "sea-bae", + "syn 2.0.37", + "unicode-ident", +] + +[[package]] +name = "sea-orm-migration" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21f673fcefb3a7e7b89a12b6c0e854ec0be14367635ac3435369c8ad7f11e09e" +dependencies = [ + "async-trait", + "clap", + "dotenvy", + "futures", + "sea-orm", + "sea-orm-cli", + "sea-schema", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "sea-query" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28c05a5bf6403834be253489bbe95fa9b1e5486bc843b61f60d26b5c9c1e244b" +dependencies = [ + "bigdecimal", + "chrono", + "derivative", + "inherent", + "ordered-float 3.9.1", + "rust_decimal", + "sea-query-derive", + "serde_json", + "time", + "uuid", +] + +[[package]] +name = "sea-query-binder" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36bbb68df92e820e4d5aeb17b4acd5cc8b5d18b2c36a4dd6f4626aabfa7ab1b9" +dependencies = [ + "bigdecimal", + "chrono", + "rust_decimal", + "sea-query", + "serde_json", + "sqlx", + "time", + "uuid", +] + +[[package]] +name = "sea-query-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd78f2e0ee8e537e9195d1049b752e0433e2cac125426bccb7b5c3e508096117" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 1.0.109", + "thiserror", +] + +[[package]] +name = "sea-schema" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cd9561232bd1b82ea748b581f15909d11de0db6563ddcf28c5d908aee8282f1" +dependencies = [ + "futures", + "sea-query", + "sea-schema-derive", +] + +[[package]] +name = "sea-schema-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6f686050f76bffc4f635cda8aea6df5548666b830b52387e8bc7de11056d11e" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "seahash" version = "4.1.0" @@ -8585,7 +9214,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -8601,9 +9230,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -8646,7 +9275,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -8682,34 +9311,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ - "base64 0.13.1", - "chrono", - "hex", - "indexmap 1.9.3", - "serde", - "serde_json", - "serde_with_macros 2.3.3", - "time", -] - -[[package]] -name = "serde_with" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" -dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "chrono", "hex", "indexmap 1.9.3", "indexmap 2.0.0", "serde", "serde_json", - "serde_with_macros 3.3.0", + "serde_with_macros 3.4.0", "time", ] @@ -8727,26 +9340,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" -dependencies = [ - "darling 0.20.3", - "proc-macro2", - "quote", - "syn 2.0.32", -] - -[[package]] -name = "serde_with_macros" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" +checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" dependencies = [ "darling 0.20.3", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -8784,7 +9385,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -8795,7 +9396,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -8804,19 +9405,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.7" @@ -8825,7 +9413,7 @@ checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -8900,38 +9488,33 @@ dependencies = [ [[package]] name = "signatory" -version = "0.23.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfecc059e81632eef1dd9b79e22fc28b8fe69b30d3357512a77a0ad8ee3c782" +checksum = "c1e303f8205714074f6068773f0e29527e0453937fe837c9717d066635b65f31" dependencies = [ - "pkcs8 0.7.6", - "rand_core 0.6.4", - "signature 1.6.4", + "pkcs8", + "rand_core", + "signature", "zeroize", ] [[package]] name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" - -[[package]] -name = "signature" -version = "2.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d" dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", + "digest", + "rand_core", ] [[package]] name = "simd-json" -version = "0.10.6" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de7f1293f0e4e11d52e588766fe9de8caa2857ff63809d40de83245452ca7c5c" +checksum = "f0f07a84c7456b901b8dd2c1d44caca8b0fd2c2616206ee5acc9d9da61e8d9ec" dependencies = [ + "getrandom", "halfbrown", "lexical-core", "serde", @@ -8948,9 +9531,9 @@ checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" [[package]] name = "similar" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" +checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" [[package]] name = "simple_asn1" @@ -9033,9 +9616,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" dependencies = [ "serde", ] @@ -9091,15 +9674,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" -dependencies = [ - "der 0.4.5", -] - [[package]] name = "spki" version = "0.7.2" @@ -9107,7 +9681,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.8", + "der", ] [[package]] @@ -9116,27 +9690,254 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" +[[package]] +name = "sqlformat" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" +dependencies = [ + "itertools 0.11.0", + "nom", + "unicode_categories", +] + [[package]] name = "sqllogictest" -version = "0.15.3" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee18b0100bc1e1a6d1f9aa242b263c34d3f475f3a2de49da2affa6c00223a2ec" +checksum = "c711f88532f6c84d912ecd2ae9d8bdf48c4780ee88d257e0301dbfb151a1b033" dependencies = [ "async-trait", "educe", "fs-err", "futures", "glob", - "humantime 2.1.0", + "humantime", "itertools 0.11.0", "libtest-mimic", "md-5", "owo-colors", "regex", "similar", + "subst", + "tempfile", + "thiserror", + "tracing", +] + +[[package]] +name = "sqlx" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e58421b6bc416714d5115a2ca953718f6c621a51b68e4f4922aea5a4391a721" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd4cef4251aabbae751a3710927945901ee1d97ee96d757f6880ebb9a79bfd53" +dependencies = [ + "ahash 0.8.3", + "atoi", + "bigdecimal", + "byteorder", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "dotenvy", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap 2.0.0", + "log", + "memchr", + "native-tls", + "once_cell", + "paste", + "percent-encoding", + "rust_decimal", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "time", + "tokio", + "tokio-stream", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "208e3165167afd7f3881b16c1ef3f2af69fa75980897aac8874a0696516d12c2" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a4a8336d278c62231d87f24e8a7a74898156e34c1c18942857be2acb29c7dfc" +dependencies = [ + "dotenvy", + "either", + "heck 0.4.1", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 1.0.109", "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca69bf415b93b60b80dc8fda3cb4ef52b2336614d8da2de5456cc942a110482" +dependencies = [ + "atoi", + "base64 0.21.4", + "bigdecimal", + "bitflags 2.4.0", + "byteorder", + "bytes", + "chrono", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "rust_decimal", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", "thiserror", + "time", "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0db2df1b8731c3651e204629dd55e52adbae0462fa1bdcbed56a2302c18181e" +dependencies = [ + "atoi", + "base64 0.21.4", + "bigdecimal", + "bitflags 2.4.0", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "num-bigint", + "once_cell", + "rand", + "rust_decimal", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "time", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4c21bf34c7cae5b283efb3ac1bcc7670df7561124dc2f8bdc0b59be40f79a2" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "time", + "tracing", + "url", + "uuid", ] [[package]] @@ -9168,12 +9969,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "strsim" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" - [[package]] name = "strsim" version = "0.10.0" @@ -9189,7 +9984,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -9200,7 +9995,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -9241,7 +10036,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -9254,6 +10049,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "subst" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca1318e5d6716d6541696727c88d9b8dfc8cfe6afd6908e186546fd4af7f5b98" +dependencies = [ + "memchr", + "unicode-width", +] + [[package]] name = "subtle" version = "2.5.0" @@ -9296,9 +10101,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -9365,9 +10170,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.11" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" +checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a" [[package]] name = "task_stats_alloc" @@ -9405,15 +10210,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" -[[package]] -name = "textwrap" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6" -dependencies = [ - "unicode-width", -] - [[package]] name = "thiserror" version = "1.0.48" @@ -9431,7 +10227,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -9478,7 +10274,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" version = "0.5.4" -source = "git+https://github.com/risingwavelabs/jemallocator.git?rev=b7f9f3#b7f9f34664dcfea190e64bef64587e23f9f2710c" +source = "git+https://github.com/risingwavelabs/jemallocator.git?rev=64a2d9#64a2d988d687a94cd859855e19241cd8b0705466" dependencies = [ "libc", "paste", @@ -9488,7 +10284,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" version = "0.5.4+5.3.0-patched" -source = "git+https://github.com/risingwavelabs/jemallocator.git?rev=b7f9f3#b7f9f34664dcfea190e64bef64587e23f9f2710c" +source = "git+https://github.com/risingwavelabs/jemallocator.git?rev=64a2d9#64a2d988d687a94cd859855e19241cd8b0705466" dependencies = [ "cc", "libc", @@ -9497,7 +10293,7 @@ dependencies = [ [[package]] name = "tikv-jemallocator" version = "0.5.4" -source = "git+https://github.com/risingwavelabs/jemallocator.git?rev=b7f9f3#b7f9f34664dcfea190e64bef64587e23f9f2710c" +source = "git+https://github.com/risingwavelabs/jemallocator.git?rev=64a2d9#64a2d988d687a94cd859855e19241cd8b0705466" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -9505,14 +10301,15 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", "libc", "num_threads", + "powerfmt", "serde", "time-core", "time-macros", @@ -9520,15 +10317,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -9605,7 +10402,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -9644,8 +10441,8 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.8" -source = "git+https://github.com/madsim-rs/rust-postgres.git?rev=4538cd6#4538cd6bacd66909a56a8aa3aa3fd7b0b52545b3" +version = "0.7.10" +source = "git+https://github.com/madsim-rs/rust-postgres.git?rev=ac00d88#ac00d8866b8abeede7747587956ef11766b5902f" dependencies = [ "async-trait", "byteorder", @@ -9661,8 +10458,10 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", + "rand", "socket2 0.5.3", "tokio-util", + "whoami", ] [[package]] @@ -9708,9 +10507,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -9738,7 +10537,19 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.15", +] + +[[package]] +name = "toml" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.20.2", ] [[package]] @@ -9763,6 +10574,19 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -9772,7 +10596,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.3", + "base64 0.21.4", "bytes", "flate2", "futures-core", @@ -9784,7 +10608,7 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost", + "prost 0.11.9", "rustls-pemfile", "tokio", "tokio-rustls 0.24.1", @@ -9796,17 +10620,44 @@ dependencies = [ "webpki-roots 0.23.1", ] +[[package]] +name = "tonic" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.4", + "bytes", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.12.1", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tonic-build" -version = "0.9.2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" +checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" dependencies = [ - "prettyplease", + "prettyplease 0.2.15", "proc-macro2", - "prost-build", + "prost-build 0.12.1", "quote", - "syn 1.0.109", + "syn 2.0.37", ] [[package]] @@ -9887,7 +10738,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -10098,6 +10949,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "unsafe-libyaml" version = "0.2.9" @@ -10136,9 +10993,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" dependencies = [ "getrandom", "rand", @@ -10151,6 +11008,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "value-bag" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" + [[package]] name = "value-trait" version = "0.6.1" @@ -10169,17 +11032,11 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "vergen" -version = "8.2.4" +version = "8.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" +checksum = "85e7dc29b3c54a2ea67ef4f953d5ec0c4085035c0ae2d325be1c0d2144bd9f16" dependencies = [ "anyhow", "rustversion", @@ -10246,9 +11103,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi-cap-std-sync" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291862f1014dd7e674f93b263d57399de4dd1907ea37e74cf7d36454536ba2f0" +checksum = "e2fe3aaf51c1e1a04a490e89f0a9cab789d21a496c0ce398d49a24f8df883a58" dependencies = [ "anyhow", "async-trait", @@ -10270,9 +11127,9 @@ dependencies = [ [[package]] name = "wasi-common" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b422ae2403cae9ca603864272a402cf5001dd6fef8632e090e00c4fb475741b" +checksum = "e74e9a2c8bfda59870a8bff38a31b9ba80b6fdb7abdfd2487177b85537d2e8a8" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -10309,7 +11166,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -10343,7 +11200,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10365,9 +11222,9 @@ dependencies = [ [[package]] name = "wasm-encoder" -version = "0.32.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba64e81215916eaeb48fee292f29401d69235d62d8b8fd92a7b2844ec5ae5f7" +checksum = "9ca90ba1b5b0a70d3d49473c5579951f3bddc78d47b59256d2f9d4922b150aca" dependencies = [ "leb128", ] @@ -10397,9 +11254,9 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.112.0" +version = "0.115.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e986b010f47fcce49cf8ea5d5f9e5d2737832f12b53ae8ae785bbe895d0877bf" +checksum = "e06c0641a4add879ba71ccb3a1e4278fd546f76f1eafb21d8f7b07733b547cd5" dependencies = [ "indexmap 2.0.0", "semver", @@ -10407,19 +11264,19 @@ dependencies = [ [[package]] name = "wasmprinter" -version = "0.2.64" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ddf5892036cd4b780d505eff1194a0cbc10ed896097656fdcea3744b5e7c2f" +checksum = "e74458a9bc5cc9c7108abfa0fe4dc88d5abf1f3baf194df3264985f17d559b5e" dependencies = [ "anyhow", - "wasmparser 0.112.0", + "wasmparser 0.115.0", ] [[package]] name = "wasmtime" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd02b992d828b91efaf2a7499b21205fe4ab3002e401e3fe0f227aaeb4001d93" +checksum = "1bc104ced94ff0a6981bde77a0bc29aab4af279914a4143b8d1af9fd4b2c9d41" dependencies = [ "anyhow", "async-trait", @@ -10455,28 +11312,28 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284466ef356ce2d909bc0ad470b60c4d0df5df2de9084457e118131b3c779b92" +checksum = "d2b28e5661a9b5f7610a62ab3c69222fa161f7bd31d04529e856461d8c3e706b" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc78cfe1a758d1336f447a47af6ec05e0df2c03c93440d70faf80e17fbb001e" +checksum = "3f58ddfe801df3886feaf466d883ea37e941bcc6d841b9f644a08c7acabfe7f8" dependencies = [ "anyhow", - "base64 0.21.3", + "base64 0.21.4", "bincode 1.3.3", "directories-next", "file-per-thread-logger", "log", "rustix 0.37.23", "serde", - "sha2 0.10.7", + "sha2", "toml 0.5.11", "windows-sys 0.48.0", "zstd 0.11.2+zstd.1.5.2", @@ -10484,9 +11341,9 @@ dependencies = [ [[package]] name = "wasmtime-component-macro" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e916103436a6d84faa4c2083e2e98612a323c2cc6147ec419124f67c764c9c" +checksum = "39725d9633fb064bd3a6d83c5ea5077289256de0862d3d96295822edb13419c0" dependencies = [ "anyhow", "proc-macro2", @@ -10499,15 +11356,15 @@ dependencies = [ [[package]] name = "wasmtime-component-util" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f20a5135ec5ef01080e674979b02d6fa5eebaa2b0c2d6660513ee9956a1bf624" +checksum = "1153feafc824f95dc69472cb89a3396b3b05381f781a7508b01840f9df7b1a51" [[package]] name = "wasmtime-cranelift" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1aa99cbf3f8edb5ad8408ba380f5ab481528ecd8a5053acf758e006d6727fd" +checksum = "4fc1e39ce9aa0fa0b319541ed423960b06cfa7343eca1574f811ea34275739c2" dependencies = [ "anyhow", "cranelift-codegen", @@ -10528,9 +11385,9 @@ dependencies = [ [[package]] name = "wasmtime-cranelift-shared" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce31fd55978601acc103acbb8a26f81c89a6eae12d3a1c59f34151dfa609484" +checksum = "2dd32739326690e51c76551d7cbf29d371e7de4dc7b37d2d503be314ab5b7d04" dependencies = [ "anyhow", "cranelift-codegen", @@ -10544,9 +11401,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f9e58e0ee7d43ff13e75375c726b16bce022db798d3a099a65eeaa7d7a544b" +checksum = "32b60e4ae5c9ae81750d8bc59110bf25444aa1d9266c19999c3b64b801db3c73" dependencies = [ "anyhow", "cranelift-entity", @@ -10566,9 +11423,9 @@ dependencies = [ [[package]] name = "wasmtime-fiber" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14309cbdf2c395258b124a24757c727403070c0465a28bcc780c4f82f4bca5ff" +checksum = "7dd40c8d869916ee6b1f3fcf1858c52041445475ca8550aee81c684c0eb530ca" dependencies = [ "cc", "cfg-if", @@ -10579,9 +11436,9 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0f2eaeb01bb67266416507829bd8e0bb60278444e4cbd048e280833ebeaa02" +checksum = "655b23a10eddfe7814feb548a466f3f25aa4bb4f43098a147305c544a2de28e1" dependencies = [ "addr2line 0.19.0", "anyhow", @@ -10605,9 +11462,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f42e59d62542bfb73ce30672db7eaf4084a60b434b688ac4f05b287d497de082" +checksum = "e46b7e98979a69d3df093076bde8431204e3c96a770e8d216fea365c627d88a4" dependencies = [ "object 0.30.4", "once_cell", @@ -10616,9 +11473,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-icache-coherence" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b49ceb7e2105a8ebe5614d7bbab6f6ef137a284e371633af60b34925493081f" +checksum = "6fb1e7c68ede63dc7a98c3e473162954e224951854e229c8b4e74697fe17dbdd" dependencies = [ "cfg-if", "libc", @@ -10627,9 +11484,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a5de4762421b0b2b19e02111ca403632852b53e506e03b4b227ffb0fbfa63c2" +checksum = "843e33bf9e0f0c57902c87a1dea1389cc23865c65f007214318dbdfcb3fd4ae5" dependencies = [ "anyhow", "cc", @@ -10654,9 +11511,9 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbb7c138f797192f46afdd3ec16f85ef007c3bb45fa8e5174031f17b0be4c4a" +checksum = "7473a07bebd85671bada453123e3d465c8e0a59668ff79f5004076e6a2235ef5" dependencies = [ "cranelift-entity", "serde", @@ -10666,9 +11523,9 @@ dependencies = [ [[package]] name = "wasmtime-wasi" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01686e859249d4dffe3d7ce9957ae35bcf4161709dfafd165ee136bd54d179f1" +checksum = "aff7b3b3272ad5b4ba63c9aac6248da6f06a8227d0c0d6017d89225d794e966c" dependencies = [ "anyhow", "async-trait", @@ -10693,9 +11550,9 @@ dependencies = [ [[package]] name = "wasmtime-winch" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60160d8f7d2b301790730dac8ff25156c61d4fed79481e7074c21dd1283cfe2f" +checksum = "351c9d4e60658dd0cf616c12c5508f86cc2cefcc0cff307eed0a31b23d3c0b70" dependencies = [ "anyhow", "cranelift-codegen", @@ -10710,9 +11567,9 @@ dependencies = [ [[package]] name = "wasmtime-wit-bindgen" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3334b0466a4d340de345cda83474d1d2c429770c3d667877971407672bc618a" +checksum = "f114407efbd09e4ef67053b6ae54c16455a821ef2f6096597fcba83b7625e59c" dependencies = [ "anyhow", "heck 0.4.1", @@ -10730,23 +11587,23 @@ dependencies = [ [[package]] name = "wast" -version = "64.0.0" +version = "66.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a259b226fd6910225aa7baeba82f9d9933b6d00f2ce1b49b80fa4214328237cc" +checksum = "93cb43b0ac6dd156f2c375735ccfd72b012a7c0a6e6d09503499b8d3cb6e6072" dependencies = [ "leb128", "memchr", "unicode-width", - "wasm-encoder 0.32.0", + "wasm-encoder 0.35.0", ] [[package]] name = "wat" -version = "1.0.71" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53253d920ab413fca1c7dc2161d601c79b4fdf631d0ba51dd4343bf9b556c3f6" +checksum = "e367582095d2903caeeea9acbb140e1db9c7677001efa4347c3687fd34fe7072" dependencies = [ - "wast 64.0.0", + "wast 66.0.2", ] [[package]] @@ -10761,9 +11618,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", @@ -10796,11 +11653,21 @@ dependencies = [ "rustix 0.38.11", ] +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +dependencies = [ + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wiggle" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea93d31f59f2b2fa4196990b684771500072d385eaac12587c63db2bc185d705" +checksum = "e63f150c6e39ef29a58139564c5ed7a0ef34d6df8a8eecd4233af85a576968d9" dependencies = [ "anyhow", "async-trait", @@ -10813,9 +11680,9 @@ dependencies = [ [[package]] name = "wiggle-generate" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df96ee6bea595fabf0346c08c553f684b08e88fad6fdb125e6efde047024f7b" +checksum = "5f31e961fb0a5ad3ff10689c85f327f4abf10b4cac033b9d7372ccbb106aea24" dependencies = [ "anyhow", "heck 0.4.1", @@ -10828,9 +11695,9 @@ dependencies = [ [[package]] name = "wiggle-macro" -version = "10.0.1" +version = "10.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8649011a011ecca6197c4db6ee630735062ba20595ea56ce58529b3b1c20aa2f" +checksum = "4a28ae3d6b90f212beca7fab5910d0a3b1a171290c06eaa81bb39f41e6f74589" dependencies = [ "proc-macro2", "quote", @@ -10871,9 +11738,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winch-codegen" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525fdd0d4e82d1bd3083bd87e8ca8014abfbdc5bf290d1d5371dac440d351e89" +checksum = "b1bf2ac354be169bb201de7867b84f45d91d0ef812f67f11c33f74a7f5a24e56" dependencies = [ "anyhow", "cranelift-codegen", @@ -11096,7 +11963,7 @@ dependencies = [ [[package]] name = "workspace-config" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "log", "openssl-sys", @@ -11107,28 +11974,32 @@ dependencies = [ [[package]] name = "workspace-hack" -version = "1.1.0-alpha" +version = "1.3.0-alpha" dependencies = [ "ahash 0.8.3", + "allocator-api2", "anyhow", + "async-std", "auto_enums", "aws-credential-types", "aws-sdk-s3", "aws-smithy-client", - "base64 0.21.3", + "base64 0.21.4", + "bit-vec", "bitflags 2.4.0", "byteorder", "bytes", "cc", "chrono", - "clap 4.4.2", + "clap", "clap_builder", "combine", "crc32fast", "crossbeam-epoch", + "crossbeam-queue", "crossbeam-utils", "deranged", - "digest 0.10.7", + "digest", "either", "fail", "fallible-iterator", @@ -11148,6 +12019,9 @@ dependencies = [ "hyper", "indexmap 1.9.3", "itertools 0.10.5", + "itertools 0.11.0", + "jni", + "lazy_static", "lexical-core", "lexical-parse-float", "lexical-parse-integer", @@ -11159,14 +12033,16 @@ dependencies = [ "log", "madsim-rdkafka", "madsim-tokio", + "md-5", "mio", - "multimap", "nom", "num-bigint", "num-integer", + "num-iter", "num-traits", "opentelemetry_api", "opentelemetry_sdk", + "ordered-float 3.9.1", "parking_lot 0.12.1", "parking_lot_core 0.9.8", "petgraph", @@ -11175,27 +12051,43 @@ dependencies = [ "postgres-types", "proc-macro2", "prometheus", - "prost", + "prost 0.11.9", + "prost 0.12.1", + "prost-types 0.12.1", "rand", "rand_chacha", - "rand_core 0.6.4", + "rand_core", + "redis", "regex", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", + "regex-automata 0.4.1", + "regex-syntax 0.8.0", "reqwest", "ring", "rust_decimal", "rustc-hash", - "rustix 0.37.23", + "rustix 0.38.11", + "rustls 0.21.7", "scopeguard", + "sea-orm", + "sea-query", + "sea-query-binder", "serde", "serde_json", - "serde_with 3.3.0", + "serde_with 3.4.0", + "sha1", + "sha2", + "signature", "smallvec", + "sqlx", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", "stable_deref_trait", + "strum 0.25.0", "subtle", "syn 1.0.109", - "syn 2.0.32", + "syn 2.0.37", "time", "time-macros", "tinyvec", @@ -11204,8 +12096,9 @@ dependencies = [ "tokio-stream", "tokio-util", "toml_datetime", - "toml_edit", - "tonic", + "toml_edit 0.19.15", + "tonic 0.10.2", + "tonic 0.9.2", "tower", "tracing", "tracing-core", @@ -11214,7 +12107,7 @@ dependencies = [ "unicode-normalization", "url", "uuid", - "zeroize", + "whoami", ] [[package]] @@ -11234,9 +12127,9 @@ checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" [[package]] name = "xorf" -version = "0.8.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57901b00e3f8e14f4d20b8955bf8087ecb545cfe2ed8741c2a2dbc89847a1a29" +checksum = "7d36478bcf71152a2f9f6cf9bc48273333f32780c769ef90e13d464ab778db5f" dependencies = [ "libm", "rand", @@ -11290,7 +12183,7 @@ checksum = "56097d5b91d711293a42be9289403896b68654625021732067eac7a4ca388a1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -11298,20 +12191,6 @@ name = "zeroize" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.32", -] [[package]] name = "zstd" @@ -11331,6 +12210,15 @@ dependencies = [ "zstd-safe 6.0.6", ] +[[package]] +name = "zstd" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +dependencies = [ + "zstd-safe 7.0.0", +] + [[package]] name = "zstd-safe" version = "5.0.2+zstd.1.5.2" @@ -11351,6 +12239,15 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" diff --git a/Cargo.toml b/Cargo.toml index 0bd3096b0467b..f8d7f6f88bfde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,16 +7,21 @@ members = [ "src/cmd_all", "src/common", "src/common/common_service", + "src/common/heap_profiling", "src/compute", "src/connector", "src/ctl", - "src/expr", + "src/expr/core", + "src/expr/impl", "src/expr/macro", "src/frontend", "src/frontend/planner_test", "src/java_binding", "src/jni_core", "src/meta", + "src/meta/node", + "src/meta/service", + "src/meta/src/model_v2/migration", "src/object_store", "src/prost", "src/prost/helpers", @@ -28,7 +33,6 @@ members = [ "src/sqlparser/test_runner", "src/storage", "src/storage/backup", - "src/storage/backup/cmd", "src/storage/compactor", "src/storage/hummock_sdk", "src/storage/hummock_test", @@ -52,7 +56,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.1.0-alpha" +version = "1.3.0-alpha" edition = "2021" homepage = "https://github.com/risingwavelabs/risingwave" keywords = ["sql", "database", "streaming"] @@ -92,10 +96,10 @@ aws-smithy-http = "0.55" aws-smithy-types = "0.55" aws-endpoint = "0.55" aws-types = "0.55" -etcd-client = { package = "madsim-etcd-client", version = "0.3" } -futures-async-stream = "0.2" +etcd-client = { package = "madsim-etcd-client", version = "0.4" } +futures-async-stream = "0.2.9" hytra = "0.1" -rdkafka = { package = "madsim-rdkafka", git = "https://github.com/madsim-rs/madsim.git", rev = "bb8f063", features = [ +rdkafka = { package = "madsim-rdkafka", version = "0.3.0", features = [ "cmake-build", ] } hashbrown = { version = "0.14.0", features = [ @@ -104,31 +108,43 @@ hashbrown = { version = "0.14.0", features = [ "nightly", ] } criterion = { version = "0.5", features = ["async_futures"] } -tonic = { package = "madsim-tonic", version = "0.3.1" } -tonic-build = { package = "madsim-tonic-build", version = "0.3.1" } -icelake = { git = "https://github.com/icelake-io/icelake", rev = "a6790d17094754959e351fac1e11147e37643e97" } -arrow-array = "46" -arrow-schema = "46" -arrow-buffer = "46" -arrow-flight = "46" -arrow-select = "46" -arrow-ipc = "46" +tonic = { package = "madsim-tonic", version = "0.4.0" } +tonic-build = { package = "madsim-tonic-build", version = "0.4.2" } +prost = { version = "0.12" } +icelake = { git = "https://github.com/icelake-io/icelake", rev = "16dab0e36ab337e58ee8002d828def2d212fa116" } +arrow-array = "47" +arrow-cast = "47" +arrow-schema = "47" +arrow-buffer = "47" +arrow-flight = "47" +arrow-select = "47" +arrow-ord = "47" +tikv-jemalloc-ctl = { git = "https://github.com/risingwavelabs/jemallocator.git", rev = "64a2d9" } +tikv-jemallocator = { git = "https://github.com/risingwavelabs/jemallocator.git", features = [ + "profiling", + "stats", +], rev = "64a2d9" } +arrow-ipc = "47" risingwave_backup = { path = "./src/storage/backup" } risingwave_batch = { path = "./src/batch" } risingwave_cmd = { path = "./src/cmd" } risingwave_common = { path = "./src/common" } risingwave_common_service = { path = "./src/common/common_service" } +risingwave_common_heap_profiling = { path = "./src/common/heap_profiling" } risingwave_compactor = { path = "./src/storage/compactor" } risingwave_compute = { path = "./src/compute" } risingwave_ctl = { path = "./src/ctl" } risingwave_connector = { path = "./src/connector" } -risingwave_expr = { path = "./src/expr" } +risingwave_expr = { path = "./src/expr/core" } +risingwave_expr_impl = { path = "./src/expr/impl" } risingwave_frontend = { path = "./src/frontend" } risingwave_hummock_sdk = { path = "./src/storage/hummock_sdk" } risingwave_hummock_test = { path = "./src/storage/hummock_test" } risingwave_hummock_trace = { path = "./src/storage/hummock_trace" } risingwave_meta = { path = "./src/meta" } +risingwave_meta_service = { path = "./src/meta/service" } +risingwave_meta_node = { path = "./src/meta/node" } risingwave_object_store = { path = "./src/object_store" } risingwave_pb = { path = "./src/prost" } risingwave_rpc_client = { path = "./src/rpc_client" } @@ -150,6 +166,8 @@ unused_must_use = "forbid" future_incompatible = "warn" nonstandard_style = "warn" rust_2018_idioms = "warn" +# Backward compatibility is not important for an application. +async_fn_in_trait = "allow" [workspace.lints.clippy] uninlined_format_args = "allow" @@ -174,8 +192,9 @@ redundant_explicit_links = "allow" lto = 'off' [profile.release] -debug = 1 -lto = 'thin' +debug = "full" +split-debuginfo = "packed" +lto = "thin" # The profile used for CI in main branch. # This profile inherits from the release profile, but turns on some checks and assertions for us to @@ -184,6 +203,7 @@ lto = 'thin' inherits = "release" incremental = false debug = "line-tables-only" +split-debuginfo = "off" debug-assertions = true overflow-checks = true @@ -212,12 +232,17 @@ opt-level = 2 incremental = false debug = 1 -# Patch third-party crates for deterministic simulation. [patch.crates-io] +# Patch third-party crates for deterministic simulation. quanta = { git = "https://github.com/madsim-rs/quanta.git", rev = "948bdc3" } getrandom = { git = "https://github.com/madsim-rs/getrandom.git", rev = "8daf97e" } tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "fe39bb8e" } tokio-retry = { git = "https://github.com/madsim-rs/rust-tokio-retry.git", rev = "95e2fd3" } -tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "4538cd6" } +tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "ac00d88" } # patch: unlimit 4MB message size for grpc client -etcd-client = { git = "https://github.com/risingwavelabs/etcd-client.git", rev = "d55550a" } +etcd-client = { git = "https://github.com/risingwavelabs/etcd-client.git", rev = "4e84d40" } + +# Patch for coverage_attribute. +# https://github.com/sgodwincs/dlv-list-rs/pull/19#issuecomment-1774786289 +dlv-list = { git = "https://github.com/sgodwincs/dlv-list-rs.git", rev = "5bbc5d0" } +ordered-multimap = { git = "https://github.com/risingwavelabs/ordered-multimap-rs.git", rev = "19c743f" } diff --git a/Makefile.toml b/Makefile.toml index 42ce20c0769ed..86e9cea136bb5 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -24,6 +24,7 @@ env_scripts = [ #!@duckscript # only duckscript can modify env variables in cargo-make +# duckscript doc: https://github.com/sagiegurari/duckscript/blob/master/docs/sdk.md set_env ENABLE_TELEMETRY "false" @@ -70,6 +71,26 @@ else set_env BUILD_HUMMOCK_TRACE_CMD "" end +is_ci = get_env RISINGWAVE_CI +is_not_ci = not ${is_ci} + +if ${is_not_ci} + query_log_path = get_env RW_QUERY_LOG_PATH + no_query_log_path = not ${query_log_path} + + if ${no_query_log_path} + set_env RW_QUERY_LOG_PATH "${PREFIX_LOG}" + fi + + rust_log = get_env RUST_LOG + no_rust_log = not ${rust_log} + + if ${no_rust_log} + set_env RUST_LOG "pgwire_query_log=info" + else + set_env RUST_LOG "pgwire_query_log=info,${rust_log}" + end +end ''', ] @@ -281,6 +302,50 @@ ln -s "$(pwd)/target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}/risingwave" "$ ln -s "$(pwd)/target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}/risingwave" "${PREFIX_BIN}/risingwave/standalone" ''' +[tasks.codesign-playground] +private = true +category = "RiseDev - Build" +description = "Codesign playground binary to support coredump" +# If core dump is enabled by RiseDev and we're on an Apple Silicon platform, +# codesign the binary before running. +# https://developer.apple.com/forums/thread/694233?answerId=695943022#695943022 +condition = { env_set = [ + "ENABLE_COREDUMP", +], env = { "SYSTEM" = "darwin-arm64" } } +script = ''' +#!/usr/bin/env bash + +set -ex +codesign -s - -f --entitlements scripts/coredump/coredump.entitlements "target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}/risingwave" +''' + +[tasks.codesign-binaries] +private = true +category = "RiseDev - Build" +description = "Codesign all binaries to support coredump" +# If core dump is enabled by RiseDev and we're on an Apple Silicon platform, +# codesign the binary before running. +# https://developer.apple.com/forums/thread/694233?answerId=695943022#695943022 +condition = { env_set = [ + "ENABLE_COREDUMP", +], env = { "SYSTEM" = "darwin-arm64" } } +script = ''' +#!/usr/bin/env bash +set -e + +binaries=() + +if [[ "$ENABLE_ALL_IN_ONE" == "true" ]]; then + binaries=("risingwave") +else + binaries=("meta-node" "compute-node" "frontend" "compactor") +fi + +set -ex +echo -n "${binaries[*]}" | parallel -d ' ' \ + "codesign -s - -f --entitlements scripts/coredump/coredump.entitlements \"target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}/{}\"" +''' + [tasks.link-user-bin] private = true category = "RiseDev - Build" @@ -307,6 +372,7 @@ dependencies = [ "link-standalone-binaries", "link-all-in-one-binaries", "link-user-bin", + "codesign-binaries", ] [tasks.b] @@ -319,7 +385,7 @@ alias = "build-risingwave" private = true category = "RiseDev - Build" description = "Extract dashboard artifact" -condition = { env_not_set = ["ENABLE_BUILD_DASHBOARD_V2"] } +condition = { env_not_set = ["ENABLE_BUILD_DASHBOARD"] } script = ''' #!/usr/bin/env bash @@ -332,11 +398,20 @@ git worktree prune git worktree add "${PREFIX_UI}" origin/dashboard-artifact ''' -[tasks.export-dashboard-v2] +[tasks.export-dashboard] private = true category = "RiseDev - Build" -description = "Build dashboard v2" -condition = { env_set = ["ENABLE_BUILD_DASHBOARD_V2"] } +description = "Build dashboard" +condition = { env_set = [ + "ENABLE_BUILD_DASHBOARD", +], files_modified = { input = [ + "./dashboard/**/*.js", + "./dashboard/**/*.ts*", + "./dashboard/package.json", + "./dashboard/next.config.js", +], output = [ + "./dashboard/out/**/*.html", +] } } script = """ #!/usr/bin/env bash set -e @@ -366,9 +441,33 @@ ${BUILD_HUMMOCK_TRACE_CMD}\ ${RISEDEV_CARGO_BUILD_EXTRA_ARGS} ''' +[tasks.build-risingwave-playground] +category = "RiseDev - Build" +description = "Build RisingWave playground" +condition = { env_true = ["ENABLE_BUILD_RUST"] } +script = ''' +#!/usr/bin/env bash + +set -e +[[ -z "${RISEDEV_RUSTFLAGS}" ]] || export RUSTFLAGS="${RISEDEV_RUSTFLAGS}" +echo + RUSTFLAGS="${RUSTFLAGS:-}" +set -xe + +cargo build -p risingwave_cmd_all \ + --profile "${RISINGWAVE_BUILD_PROFILE}" \ + ${RISINGWAVE_FEATURE_FLAGS} \ + ${RISEDEV_CARGO_BUILD_EXTRA_ARGS} +''' + [tasks.clean] private = true category = "RiseDev - Build" +description = "Clean all build targets" +dependencies = ["clean-rust", "clean-java"] + +[tasks.clean-rust] +private = true +category = "RiseDev - Build" description = "Clean Rust targets" condition = { env_set = ["ENABLE_BUILD_RUST"] } script = ''' @@ -377,6 +476,25 @@ set -e cargo clean ''' +[tasks.clean-java] +private = true +category = "RiseDev - Build" +description = "Clean Rust targets" +condition = { env_set = ["ENABLE_RW_CONNECTOR", "ENABLE_BUILD_RW_CONNECTOR"] } +script = ''' +#!/usr/bin/env bash +set -e + +if command -v mvn &> /dev/null; then + MAVEN_PATH="$(command -v mvn)" +else + MAVEN_PATH="${PREFIX_BIN}/maven/bin/mvn" +fi + +cd "${JAVA_DIR}" +"${MAVEN_PATH}" clean +''' + [tasks.build-docs] private = true category = "RiseDev - Build" @@ -460,7 +578,7 @@ dependencies = [ "build-connector-node", "post-build-risingwave", "extract-dashboard-artifact", - "export-dashboard-v2", + "export-dashboard", "prepare-config", ] @@ -478,6 +596,15 @@ dependencies = [ "download-redis", ] +[tasks.pre-start-playground] +category = "RiseDev - Prepare" +description = "Preparation steps for playground" +dependencies = [ + "build-risingwave-playground", + "codesign-playground", + "build-connector-node", +] + [tasks.check-risedev-env-file] private = true category = "RiseDev - Prepare" @@ -521,33 +648,35 @@ alias = "playground" [tasks.playground] category = "RiseDev - Start/Stop" description = "🌟 Start a lite RisingWave playground using risingwave all-in-one binary" -dependencies = ["build-connector-node"] +dependencies = ["pre-start-playground"] script = ''' #!/usr/bin/env bash -set -ex +set -e +if [[ $ENABLE_COREDUMP == "true" ]]; then + echo "+ ulimit -c unlimited" + ulimit -c unlimited +fi -RUST_BACKTRACE=1 \ -cargo run -p risingwave_cmd_all \ - --profile "${RISINGWAVE_BUILD_PROFILE}" \ - ${RISINGWAVE_FEATURE_FLAGS} \ - -- playground +set -x +target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}/risingwave playground $@ ''' [tasks.standalone] category = "RiseDev - Start/Stop" description = "🌟 Start a RisingWave standalone instance" -dependencies = ["build"] +dependencies = ["pre-start-playground"] script = ''' #!/usr/bin/env bash -set -euo pipefail +set -e +if [[ $ENABLE_COREDUMP == "true" ]]; then + echo "+ ulimit -c unlimited" + ulimit -c unlimited +fi -RUST_BACKTRACE=1 \ -cargo run -p risingwave_cmd_all \ - --profile "${RISINGWAVE_BUILD_PROFILE}" \ - ${RISINGWAVE_FEATURE_FLAGS} \ - -- standalone $@ +set -x +target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}/risingwave standalone $@ ''' # TODO(kwannoel): Support `tasks.standalone-dev` as well. @@ -570,8 +699,19 @@ alias = "dev" [tasks.dev] category = "RiseDev - Start/Stop" dependencies = ["pre-start-dev"] -script = "RUST_BACKTRACE=1 target/${BUILD_MODE_DIR}/risedev-dev ${@}" description = "🌟 Start a full RisingWave dev cluster using risedev-dev" +script = ''' +#!/usr/bin/env bash + +set -e +if [[ $ENABLE_COREDUMP == "true" ]]; then + echo "+ ulimit -c unlimited" + ulimit -c unlimited +fi + +set -x +target/${BUILD_MODE_DIR}/risedev-dev ${@} +''' [tasks.kill-risedev] category = "RiseDev - Start/Stop" @@ -700,6 +840,10 @@ echo echo "check: $(tput setaf 4)protoc >= 3.12.0$(tput sgr0)" protoc --version || echo "$(tput setaf 3)protoc$(tput sgr0) not found." echo + +echo "check: $(tput setaf 4)parallel >= 2022XXXX$(tput sgr0)" +parallel --version || echo "$(tput setaf 3)parallel$(tput sgr0) not found." +echo """ description = "Install (or upgrade) required tools to do pre-CI check and run e2e tests" @@ -758,7 +902,7 @@ TARGET_PATH="${JAVA_DIR}/connector-node/assembly/target/${ARTIFACT}" echo "Building connector node..." cd "${JAVA_DIR}" -"${MAVEN_PATH}" --batch-mode --update-snapshots package -Dmaven.test.skip +"${MAVEN_PATH}" --batch-mode --update-snapshots package -Dmaven.test.skip -Dno-build-rust rm -rf ${PREFIX_BIN}/connector-node mkdir -p "${PREFIX_BIN}/connector-node" @@ -857,7 +1001,18 @@ set -e cargo check \ --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ - -p risingwave_simulation --all-targets "$@" + -p risingwave_batch \ + -p risingwave_common \ + -p risingwave_compute \ + -p risingwave_connector \ + -p risingwave_frontend \ + -p risingwave_meta \ + -p risingwave_object_store \ + -p risingwave_source \ + -p risingwave_storage \ + -p risingwave_stream \ + -p pgwire \ + -p risingwave_simulation --tests "$@" """ [tasks.sslt] @@ -1162,7 +1317,6 @@ dependencies = ["k", "l", "check-logs", "wait-processes-exit"] description = "Kill cluster, dump logs and check logs" [tasks.wait-processes-exit] -private = true category = "Misc" description = "Wait for RisingWave processes to exit" script = """ @@ -1184,9 +1338,12 @@ echo "All processes has exited." [tasks.slt] category = "RiseDev - SQLLogicTest" -install_crate = { version = "0.15.3", crate_name = "sqllogictest-bin", binary = "sqllogictest", test_arg = [ +install_crate = { version = "0.17.1", crate_name = "sqllogictest-bin", binary = "sqllogictest", test_arg = [ "--help", ], install_command = "binstall" } +dependencies = ["check-risedev-env-file"] +env_files = ["${PREFIX_CONFIG}/risedev-env"] +env = { SLT_HOST = "${RW_FRONTEND_LISTEN_ADDRESS}", SLT_PORT = "${RW_FRONTEND_PORT}", SLT_DB = "dev" } command = "sqllogictest" args = ["${@}"] description = "🌟 Run SQLLogicTest" @@ -1288,4 +1445,16 @@ cargo run -p risingwave_common --bin example-config >> src/config/example.toml [tasks.backwards-compat-test] category = "RiseDev - Backwards Compatibility Test" description = "Run backwards compatibility test" -script = "./backwards-compat-tests/scripts/run_local.sh" \ No newline at end of file +script = "./backwards-compat-tests/scripts/run_local.sh" + +# For debugging. +# To show the env for a specific task, use `run_task = "show-env"` for that task. +[tasks.show-env] +private = true +description = "Show cargo-make runtime environment variables" +script = """ +#!@duckscript +# https://github.com/sagiegurari/cargo-make/issues/889 +vars = dump_variables +echo ${vars} +""" diff --git a/README.md b/README.md index d63369b0200a6..29a7d7e51888a 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ +

@@ -5,23 +6,110 @@

-[![Slack](https://badgen.net/badge/Slack/Join%20RisingWave/0abd59?icon=slack)](https://join.slack.com/t/risingwave-community/shared_invite/zt-120rft0mr-d8uGk3d~NZiZAQWPnElOfw) -[![Build status](https://badge.buildkite.com/9394d2bca0f87e2e97aa78b25f765c92d4207c0b65e7f6648f.svg)](https://buildkite.com/risingwavelabs/main) -[![codecov](https://codecov.io/gh/risingwavelabs/risingwave/branch/main/graph/badge.svg?token=EB44K9K38B)](https://codecov.io/gh/risingwavelabs/risingwave) - -RisingWave is a distributed SQL streaming database. It is designed to reduce the complexity and cost of building stream processing applications. RisingWave consumes streaming data, performs incremental computations when new data comes in, and updates results dynamically. As a database system, RisingWave maintains results inside its own storage so that users can access data efficiently. -RisingWave offers wire compatibility with PostgreSQL and demonstrates exceptional performance surpassing the previous generation of stream processing systems, including Apache Flink, by several orders of magnitude. -It particularly excels in handling complex stateful operations like multi-stream joins. +
-RisingWave ingests data from sources like Apache Kafka, Apache Pulsar, Amazon Kinesis, Redpanda, and materialized CDC sources. Data in RisingWave can be delivered to external targets such as message brokers, data warehouses, and data lakes for storage or additional processing. +### 🌊Stream Processing Redefined. -RisingWave 1.0 is a battle-tested version that has undergone rigorous stress tests and performance evaluations. It has proven its reliability and efficiency through successful deployments in numerous production environments across dozens of companies. +
-Learn more at [Introduction to RisingWave](https://docs.risingwave.com/docs/current/intro/). +

+ Documentation   📑    + Hands-on Tutorials   🎯    + RisingWave Cloud   🚀    + + Get Instant Help + +

+
+ + Slack + + + Build status + + + codecov + +
+ +RisingWave is a distributed SQL streaming database that enables simple, efficient, and reliable processing of streaming data. ![RisingWave](https://github.com/risingwavelabs/risingwave-docs/blob/0f7e1302b22493ba3c1c48e78810750ce9a5ff42/docs/images/archi_simple.png) +## How to install +**Ubuntu** +``` +wget https://github.com/risingwavelabs/risingwave/releases/download/v1.3.0/risingwave-v1.3.0-x86_64-unknown-linux.tar.gz +tar xvf risingwave-v1.3.0-x86_64-unknown-linux.tar.gz +./risingwave playground +``` +**Mac** +``` +brew tap risingwavelabs/risingwave +brew install risingwave +risingwave playground +``` +Now connect to RisingWave using `psql`: +``` +psql -h localhost -p 4566 -d dev -U root +``` + +Learn more at [Quick Start](https://docs.risingwave.com/docs/current/get-started/). + +## Why RisingWave for stream processing? +RisingWave adaptly tackles some of the most challenging problems in stream processing. Compared to existing stream processing systems, RisingWave shines through with the following key features: +* **Easy to learn** + * RisingWave speaks PostgreSQL-style SQL, enabling users to dive into stream processing in much the same way as operating a PostgreSQL database. +* **Highly efficient in multi-stream joins** + * RisingWave has made significant optimizations for multiple stream join scenarios. Users can easily join 10-20 streams (or more) efficiently in a production environment. +* **High resource utilization** + * Queries in RisingWave leverage shared computational resources, eliminating the need for users to manually allocate resources for each query. +* **No compromise on large state management** + * The decoupled compute-storage architecture of RisingWave ensures remote persistence of internal states, and users never need to worry about the size of internal states when handling complex queries. +* **Transparent dynamic scaling** + * RisingWave supports near-instantaneous dynamic scaling without any service interruptions. +* **Instant failure recovery** + * RisingWave's state management mechanism allows it to recover from failure in seconds, not minutes or hours. +* **Easy to verify correctness** + * RisingWave persists results in materialized views and allow users to break down complex stream computation programs into stacked materialized views, simplifying program development and result verification. +* **Simplified data stack** + * RisingWave's ability to store data and serve queries eliminates the need for separate maintenance of stream processors and databases. Users can effortlessly connect RisingWave to their preferred BI tools or through client libraries. +* **Simple to maintain and operate** + * RisingWave abstracts away unnecessary low-level details, allowing users to concentrate solely on SQL code-level issues. +* **Rich ecosystem** + * With integrations to a diverse range of cloud systems and the PostgreSQL ecosystem, RisingWave boasts a rich and expansive ecosystem. + +## RisingWave's limitations +RisingWave isn’t a panacea for all data engineering hurdles. It has its own set of limitations: +* **No programmable interfaces** + * RisingWave does not provide low-level APIs in languages like Java and Scala, and does not allow users to manage internal states manually (unless you want to hack!). For coding in Java, Scala, and other languages, please consider using RisingWave's User-Defined Functions (UDF). +* **No support for transaction processing** + * RisingWave isn’t cut out for transactional workloads, thus it’s not a viable substitute for operational databases dedicated to transaction processing. However, it supports read-only transactions, ensuring data freshness and consistency. It also comprehends the transactional semantics of upstream database Change Data Capture (CDC). +* **Not tailored for ad-hoc analytical queries** + * RisingWave's row store design is tailored for optimal stream processing performance rather than interactive analytical workloads. Hence, it's not a suitable replacement for OLAP databases. Yet, a reliable integration with many OLAP databases exists, and a collaborative use of RisingWave and OLAP databases is a common practice among many users. + ## RisingWave Cloud @@ -29,23 +117,14 @@ RisingWave Cloud is a fully-managed and scalable stream processing platform powe ## Notes on telemetry -RisingWave collects anonymous usage statistics to better understand how the community is using RisingWave. The sole intention of this exercise is to help improve the product. These statistics are related to system resource usage, OS versions and system uptime. RisingWave doesn't have access to any user data or metadata running on RisingWave clusters including source and sink connection parameters, sources, sinks, materialized views, and tables. Users have the option to opt out of this collection using a system parameter. Please refer to the RisingWave user documentation for more details. - -## Get started - -- To learn about how to install and run RisingWave, see [Get started](https://docs.risingwave.com/docs/current/get-started/). -- To learn about how to ingest data and the supported data sources, see [Sources](https://docs.risingwave.com/docs/current/data-ingestion/). -- To learn about how to transform data using the PostgreSQL-compatible SQL of RisingWave, see [SQL reference](https://docs.risingwave.com/docs/current/sql-references/). -- To learn about how to deliver data and the supported data sinks, see [Sinks](https://docs.risingwave.com/docs/current/data-delivery/). -- To learn about new features and changes in the current and previous versions, see [Release notes](https://docs.risingwave.com/release-notes/). - -## Documentation +RisingWave collects anonymous usage statistics to better understand how the community is using RisingWave. The sole intention of this exercise is to help improve the product. Users may opt out easily at any time. Please refer to the [user documentation](https://docs.risingwave.com/docs/current/telemetry/) for more details. -To learn about how to use RisingWave, refer to [RisingWave User Documentation](https://docs.risingwave.com/). To learn about the development process, see the [developer guide](docs/developer-guide.md). To understand the design and implementation of RisingWave, refer to the design docs listed in [readme.md](docs/README.md). +## In-production use cases +Like other stream processing systems, the primary use cases of RisingWave include monitoring, alerting, real-time dashboard reporting, streaming ETL (Extract, Transform, Load), machine learning feature engineering, and more. It has already been adopted in fields such as financial trading, manufacturing, new media, logistics, gaming, and more. Check out [customer stories](https://www.risingwave.com/use-cases/). ## Community -Looking for help, discussions, collaboration opportunities, or a casual afternoon chat with our fellow engineers and community members? Join our [Slack workspace](https://join.slack.com/t/risingwave-community/shared_invite/zt-120rft0mr-d8uGk3d~NZiZAQWPnElOfw)! +Looking for help, discussions, collaboration opportunities, or a casual afternoon chat with our fellow engineers and community members? Join our [Slack workspace](https://risingwave.com/slack)! ## License diff --git a/backwards-compat-tests/scripts/run_local.sh b/backwards-compat-tests/scripts/run_local.sh index cfa164ec35a29..bf2f09c6841a4 100755 --- a/backwards-compat-tests/scripts/run_local.sh +++ b/backwards-compat-tests/scripts/run_local.sh @@ -46,13 +46,14 @@ EOF setup_old_cluster() { echo "--- Setting up old cluster" + LATEST_BRANCH=$(git branch --show-current) git checkout "v${OLD_VERSION}-rc" } setup_new_cluster() { echo "--- Setting up new cluster" rm -r .risingwave/bin/risingwave - git checkout main + git checkout $LATEST_BRANCH } main() { diff --git a/backwards-compat-tests/slt/tpch-backwards-compat/seed.slt b/backwards-compat-tests/slt/tpch-backwards-compat/seed.slt index b56cefeab247e..9cb3b200f4f84 100644 --- a/backwards-compat-tests/slt/tpch-backwards-compat/seed.slt +++ b/backwards-compat-tests/slt/tpch-backwards-compat/seed.slt @@ -4,9 +4,6 @@ SET RW_IMPLICIT_FLUSH TO true; statement ok SET QUERY_MODE TO distributed; -statement ok -SET CREATE_COMPACTION_GROUP_FOR_MV TO true; - include ../tpch/create_tables.slt.part include ../tpch/insert_customer.slt.part diff --git a/ci/Dockerfile b/ci/Dockerfile index ec7607f0d62ba..7b2a13b38a5ce 100644 --- a/ci/Dockerfile +++ b/ci/Dockerfile @@ -7,7 +7,7 @@ RUN sed -i 's|http://archive.ubuntu.com/ubuntu|http://us-east-2.ec2.archive.ubun RUN apt-get update -yy && \ DEBIAN_FRONTEND=noninteractive apt-get -y install make build-essential cmake protobuf-compiler curl parallel python3 python3-pip \ openssl libssl-dev libsasl2-dev libcurl4-openssl-dev pkg-config bash openjdk-11-jdk wget unzip git tmux lld postgresql-client kafkacat netcat mysql-client \ - maven zstd libzstd-dev -yy \ + maven zstd libzstd-dev locales -yy \ && rm -rf /var/lib/{apt,dpkg,cache,log}/ SHELL ["/bin/bash", "-c"] @@ -42,8 +42,8 @@ ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall -y --no-symlinks cargo-llvm-cov cargo-nextest cargo-hakari cargo-sort cargo-cache cargo-audit \ cargo-make@0.36.10 \ - sqllogictest-bin@0.15.3 \ - && cargo install sccache --locked \ + sqllogictest-bin@0.17.0 \ + && cargo install sccache \ && cargo cache -a \ && rm -rf "/root/.cargo/registry/index" \ && rm -rf "/root/.cargo/registry/cache" \ diff --git a/ci/build-ci-image.sh b/ci/build-ci-image.sh index 299b5d91878fe..59c88e5e9a9ae 100755 --- a/ci/build-ci-image.sh +++ b/ci/build-ci-image.sh @@ -13,7 +13,7 @@ cat ../rust-toolchain # !!! CHANGE THIS WHEN YOU WANT TO BUMP CI IMAGE !!! # # AND ALSO docker-compose.yml # ###################################################### -export BUILD_ENV_VERSION=v20230909 +export BUILD_ENV_VERSION=v20231022 export BUILD_TAG="public.ecr.aws/x5u3w5h6/rw-build-env:${BUILD_ENV_VERSION}" diff --git a/ci/docker-compose.yml b/ci/docker-compose.yml index d0cdbe4af816a..66dd2d175e675 100644 --- a/ci/docker-compose.yml +++ b/ci/docker-compose.yml @@ -71,7 +71,7 @@ services: retries: 5 source-test-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20231022 depends_on: - mysql - db @@ -81,22 +81,24 @@ services: - ..:/risingwave sink-test-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20231022 depends_on: - mysql - db + - message_queue - elasticsearch - clickhouse-server + - pulsar volumes: - ..:/risingwave rw-build-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20231022 volumes: - ..:/risingwave ci-flamegraph-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20231022 # NOTE(kwannoel): This is used in order to permit # syscalls for `nperf` (perf_event_open), # so it can do CPU profiling. @@ -107,7 +109,7 @@ services: - ..:/risingwave regress-test-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20231022 depends_on: db: condition: service_healthy @@ -182,3 +184,19 @@ services: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9093,PLAINTEXT_INTERNAL://localhost:29093 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + + pulsar: + container_name: pulsar + image: apachepulsar/pulsar:latest + command: bin/pulsar standalone + ports: + - "6650:6650" + - "6651:8080" + expose: + - "8080" + - "6650" + healthcheck: + test: [ "CMD-SHELL", "bin/pulsar-admin brokers healthcheck"] + interval: 5s + timeout: 5s + retries: 5 diff --git a/ci/rust-toolchain b/ci/rust-toolchain index ebc0b6c285a4e..fe2a026f6e40f 100644 --- a/ci/rust-toolchain +++ b/ci/rust-toolchain @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2023-09-09" +channel = "nightly-2023-10-21" diff --git a/ci/scripts/backfill-test.sh b/ci/scripts/backfill-test.sh index 039e8bee94865..c0b95da958fed 100755 --- a/ci/scripts/backfill-test.sh +++ b/ci/scripts/backfill-test.sh @@ -30,13 +30,6 @@ git config --global --add safe.directory /risingwave download_and_prepare_rw "$profile" common -echo "--- e2e, ci-backfill, build" -cargo make ci-start ci-backfill - ################ TESTS -echo "--- e2e, ci-backfill, run backfill test" ./ci/scripts/run-backfill-tests.sh - -echo "--- Kill cluster" -cargo make kill diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 1fdf5ae149872..d42ef9d78516e 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -51,13 +51,12 @@ cargo build \ -p risingwave_regress_test \ -p risingwave_sqlsmith \ -p risingwave_compaction_test \ - -p risingwave_backup_cmd \ -p risingwave_e2e_extended_mode_test \ $RISINGWAVE_FEATURE_FLAGS \ --profile "$profile" -artifacts=(risingwave sqlsmith compaction-test backup-restore risingwave_regress_test risingwave_e2e_extended_mode_test risedev-dev delete-range-test) +artifacts=(risingwave sqlsmith compaction-test risingwave_regress_test risingwave_e2e_extended_mode_test risedev-dev delete-range-test) echo "--- Show link info" ldd target/"$profile"/risingwave diff --git a/ci/scripts/check.sh b/ci/scripts/check.sh index 728788227e8f6..2d194c40e2e99 100755 --- a/ci/scripts/check.sh +++ b/ci/scripts/check.sh @@ -42,7 +42,7 @@ sccache --show-stats sccache --zero-stats echo "--- Run doctest" -cargo test --doc +RUSTDOCFLAGS="-Clink-arg=-fuse-ld=lld" cargo test --doc echo "--- Show sccache stats" sccache --show-stats @@ -50,7 +50,4 @@ sccache --zero-stats echo "--- Run audit check" cargo audit \ - --ignore RUSTSEC-2023-0052 --ignore RUSTSEC-2022-0093 - # https://github.com/risingwavelabs/risingwave/issues/11842 - # https://github.com/risingwavelabs/risingwave/issues/11986 - + --ignore RUSTSEC-2023-0052 # https://github.com/risingwavelabs/risingwave/issues/11842 diff --git a/ci/scripts/cron-fuzz-test.sh b/ci/scripts/cron-fuzz-test.sh index f12e3063a5a3b..c58f074decdf1 100755 --- a/ci/scripts/cron-fuzz-test.sh +++ b/ci/scripts/cron-fuzz-test.sh @@ -4,8 +4,13 @@ set -euo pipefail source ci/scripts/common.sh -export RUN_SQLSMITH=0 -export RUN_SQLSMITH_FRONTEND=1 + +# NOTE(kwannoel): Disabled because there's some breakage after #12485, +# see https://github.com/risingwavelabs/risingwave/issues/12577. +# Frontend is relatively stable, e2e fuzz test will cover the same cases also, +# so we can just disable it. +export RUN_SQLSMITH_FRONTEND=0 +export RUN_SQLSMITH=1 export SQLSMITH_COUNT=1000 export TEST_NUM=100 source ci/scripts/run-fuzz-test.sh diff --git a/ci/scripts/deterministic-recovery-test.sh b/ci/scripts/deterministic-recovery-test.sh index 0d3a7b3fabed4..c5f89a2bbc7e0 100755 --- a/ci/scripts/deterministic-recovery-test.sh +++ b/ci/scripts/deterministic-recovery-test.sh @@ -9,11 +9,20 @@ echo "--- Download artifacts" download-and-decompress-artifact risingwave_simulation . chmod +x ./risingwave_simulation -export RUST_LOG="info,risingwave_meta::barrier::recovery=debug" +export RUST_LOG="info,\ +risingwave_meta::barrier::recovery=debug,\ +risingwave_meta::manager::catalog=debug,\ +risingwave_meta::rpc::ddl_controller=debug,\ +risingwave_meta::barrier::mod=debug,\ +risingwave_simulation=debug" export LOGDIR=.risingwave/log mkdir -p $LOGDIR +# FIXME(kwannoel): Why is this failing? +# echo "--- deterministic simulation e2e, ci-3cn-2fe-3meta, recovery, background_ddl" +# seq $TEST_NUM | parallel MADSIM_TEST_SEED={} './risingwave_simulation --kill --kill-rate=${KILL_RATE} ./e2e_test/background_ddl/sim/basic.slt 2> $LOGDIR/recovery-ddl-{}.log && rm $LOGDIR/recovery-ddl-{}.log' + echo "--- deterministic simulation e2e, ci-3cn-2fe-3meta, recovery, ddl" seq $TEST_NUM | parallel MADSIM_TEST_SEED={} './risingwave_simulation --kill --kill-rate=${KILL_RATE} ./e2e_test/ddl/\*\*/\*.slt 2> $LOGDIR/recovery-ddl-{}.log && rm $LOGDIR/recovery-ddl-{}.log' diff --git a/ci/scripts/docker.sh b/ci/scripts/docker.sh index d84cbc39016dc..f7936f9987d70 100755 --- a/ci/scripts/docker.sh +++ b/ci/scripts/docker.sh @@ -7,9 +7,26 @@ ghcraddr="ghcr.io/risingwavelabs/risingwave" dockerhubaddr="risingwavelabs/risingwave" arch="$(uname -m)" +echo "--- ghcr login" +echo "$GHCR_TOKEN" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin + +echo "--- dockerhub login" +echo "$DOCKER_TOKEN" | docker login -u "risingwavelabs" --password-stdin + # Build RisingWave docker image ${BUILDKITE_COMMIT}-${arch} echo "--- docker build and tag" -docker build -f docker/Dockerfile --build-arg "GIT_SHA=${BUILDKITE_COMMIT}" -t "${ghcraddr}:${BUILDKITE_COMMIT}-${arch}" --target risingwave . +docker buildx create \ + --name container \ + --driver=docker-container + +docker buildx build -f docker/Dockerfile \ + --build-arg "GIT_SHA=${BUILDKITE_COMMIT}" -t "${ghcraddr}:${BUILDKITE_COMMIT}-${arch}" \ + --progress plain \ + --builder=container \ + --load \ + --cache-to "type=registry,ref=ghcr.io/risingwavelabs/risingwave-build-cache:${arch}" \ + --cache-from "type=registry,ref=ghcr.io/risingwavelabs/risingwave-build-cache:${arch}" \ + . echo "--- check the image can start correctly" container_id=$(docker run -d "${ghcraddr}:${BUILDKITE_COMMIT}-${arch}" playground) @@ -25,12 +42,6 @@ fi echo "--- docker images" docker images -echo "--- ghcr login" -echo "$GHCR_TOKEN" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin - -echo "--- dockerhub login" -echo "$DOCKER_TOKEN" | docker login -u "risingwavelabs" --password-stdin - echo "--- docker push to ghcr" docker push "${ghcraddr}:${BUILDKITE_COMMIT}-${arch}" diff --git a/ci/scripts/e2e-iceberg-cdc.sh b/ci/scripts/e2e-iceberg-cdc.sh new file mode 100755 index 0000000000000..081f5bbd2afcb --- /dev/null +++ b/ci/scripts/e2e-iceberg-cdc.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +# Exits as soon as any line fails. +set -euo pipefail + +source ci/scripts/common.sh + +# prepare environment +export CONNECTOR_RPC_ENDPOINT="localhost:50051" +export CONNECTOR_LIBS_PATH="./connector-node/libs" + +while getopts 'p:' opt; do + case ${opt} in + p ) + profile=$OPTARG + ;; + \? ) + echo "Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + ;; + esac +done +shift $((OPTIND -1)) + +download_and_prepare_rw "$profile" source + +echo "--- Download connector node package" +buildkite-agent artifact download risingwave-connector.tar.gz ./ +mkdir ./connector-node +tar xf ./risingwave-connector.tar.gz -C ./connector-node + +echo "--- e2e, ci-1cn-1fe, iceberg cdc" + +node_port=50051 +node_timeout=10 + +wait_for_connector_node_start() { + start_time=$(date +%s) + while : + do + if nc -z localhost $node_port; then + echo "Port $node_port is listened! Connector Node is up!" + break + fi + + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + if [ $elapsed_time -ge $node_timeout ]; then + echo "Timeout waiting for port $node_port to be listened!" + exit 1 + fi + sleep 0.1 + done + sleep 2 +} + +echo "--- starting risingwave cluster with connector node" + +RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" \ +cargo make ci-start ci-1cn-1fe-with-recovery +./connector-node/start-service.sh -p $node_port > .risingwave/log/connector-node.log 2>&1 & +echo "waiting for connector node to start" +wait_for_connector_node_start + +# prepare minio iceberg sink +echo "--- preparing iceberg" +.risingwave/bin/mcli -C .risingwave/config/mcli mb hummock-minio/icebergdata + +cd e2e_test/iceberg +bash ./start_spark_connect_server.sh + +# Don't remove the `--quiet` option since poetry has a bug when printing output, see +# https://github.com/python-poetry/poetry/issues/3412 +"$HOME"/.local/bin/poetry update --quiet + +# 1. import data to mysql +mysql --host=mysql --port=3306 -u root -p123456 < ./test_case/cdc/mysql_cdc.sql + +# 2. create table and sink +"$HOME"/.local/bin/poetry run python main.py -t ./test_case/cdc/no_partition_cdc_init.toml + +# 3. insert new data to mysql +mysql --host=mysql --port=3306 -u root -p123456 < ./test_case/cdc/mysql_cdc_insert.sql + +sleep 20 + +# 4. check change +"$HOME"/.local/bin/poetry run python main.py -t ./test_case/cdc/no_partition_cdc.toml \ No newline at end of file diff --git a/ci/scripts/e2e-iceberg-sink-test.sh b/ci/scripts/e2e-iceberg-sink-test.sh index 41f1ee2a80f26..1a12225ab5435 100755 --- a/ci/scripts/e2e-iceberg-sink-test.sh +++ b/ci/scripts/e2e-iceberg-sink-test.sh @@ -5,6 +5,9 @@ set -euo pipefail source ci/scripts/common.sh +# prepare environment +export CONNECTOR_LIBS_PATH="./connector-node/libs" + while getopts 'p:' opt; do case ${opt} in p ) diff --git a/ci/scripts/e2e-iceberg-sink-v2-test.sh b/ci/scripts/e2e-iceberg-sink-v2-test.sh index 83c0d187d6b3b..0e8054a4946af 100755 --- a/ci/scripts/e2e-iceberg-sink-v2-test.sh +++ b/ci/scripts/e2e-iceberg-sink-v2-test.sh @@ -38,7 +38,10 @@ bash ./start_spark_connect_server.sh # Don't remove the `--quiet` option since poetry has a bug when printing output, see # https://github.com/python-poetry/poetry/issues/3412 "$HOME"/.local/bin/poetry update --quiet -"$HOME"/.local/bin/poetry run python main.py +"$HOME"/.local/bin/poetry run python main.py -t ./test_case/no_partition_append_only.toml +"$HOME"/.local/bin/poetry run python main.py -t ./test_case/no_partition_upsert.toml +"$HOME"/.local/bin/poetry run python main.py -t ./test_case/partition_append_only.toml +"$HOME"/.local/bin/poetry run python main.py -t ./test_case/partition_upsert.toml echo "--- Kill cluster" diff --git a/ci/scripts/e2e-kafka-sink-test.sh b/ci/scripts/e2e-kafka-sink-test.sh index 7f03945fe5b6e..71a91f2d8fba9 100755 --- a/ci/scripts/e2e-kafka-sink-test.sh +++ b/ci/scripts/e2e-kafka-sink-test.sh @@ -3,9 +3,10 @@ # Exits as soon as any line fails. set -euo pipefail -./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-append-only --create > /dev/null 2>&1 -./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-upsert --create > /dev/null 2>&1 -./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-debezium --create > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-append-only --create > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert --create > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert-schema --create > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-debezium --create > /dev/null 2>&1 sqllogictest -p 4566 -d dev 'e2e_test/sink/kafka/create_sink.slt' sleep 2 @@ -13,7 +14,7 @@ sleep 2 # test append-only kafka sink echo "testing append-only kafka sink" diff ./e2e_test/sink/kafka/append_only1.result \ -<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-append-only --from-beginning --max-messages 10 | sort) 2> /dev/null) +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-append-only --from-beginning --max-messages 10 | sort) 2> /dev/null) if [ $? -ne 0 ]; then echo "The output for append-only sink is not as expected." exit 1 @@ -22,15 +23,24 @@ fi # test upsert kafka sink echo "testing upsert kafka sink" diff ./e2e_test/sink/kafka/upsert1.result \ -<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-upsert --from-beginning --property print.key=true --max-messages 10 | sort) 2> /dev/null) +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert --from-beginning --property print.key=true --max-messages 10 | sort) 2> /dev/null) if [ $? -ne 0 ]; then echo "The output for upsert sink is not as expected." exit 1 fi +# test upsert kafka sink with schema +echo "testing upsert kafka sink with schema" +diff ./e2e_test/sink/kafka/upsert_schema1.result \ +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert-schema --from-beginning --property print.key=true --max-messages 10 | sort) 2> /dev/null) +if [ $? -ne 0 ]; then + echo "The output for upsert sink with schema is not as expected." + exit 1 +fi + # test debezium kafka sink echo "testing debezium kafka sink" -(./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-debezium --property print.key=true --from-beginning --max-messages 10 | sort) > ./e2e_test/sink/kafka/debezium1.tmp.result 2> /dev/null +(./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-debezium --property print.key=true --from-beginning --max-messages 10 | sort) > ./e2e_test/sink/kafka/debezium1.tmp.result 2> /dev/null python3 e2e_test/sink/kafka/debezium.py e2e_test/sink/kafka/debezium1.result e2e_test/sink/kafka/debezium1.tmp.result if [ $? -ne 0 ]; then echo "The output for debezium sink is not as expected." @@ -47,7 +57,7 @@ psql -h localhost -p 4566 -d dev -U root -c "update t_kafka set v_varchar = '', # test append-only kafka sink after update echo "testing append-only kafka sink after updating data" diff ./e2e_test/sink/kafka/append_only2.result \ -<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-append-only --from-beginning --max-messages 11 | sort) 2> /dev/null) +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-append-only --from-beginning --max-messages 11 | sort) 2> /dev/null) if [ $? -ne 0 ]; then echo "The output for append-only sink after update is not as expected." exit 1 @@ -56,15 +66,24 @@ fi # test upsert kafka sink after update echo "testing upsert kafka sink after updating data" diff ./e2e_test/sink/kafka/upsert2.result \ -<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-upsert --from-beginning --property print.key=true --max-messages 11 | sort) 2> /dev/null) +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert --from-beginning --property print.key=true --max-messages 11 | sort) 2> /dev/null) if [ $? -ne 0 ]; then echo "The output for upsert sink after update is not as expected." exit 1 fi +# test upsert kafka sink with schema after update +echo "testing upsert kafka sink with schema after updating data" +diff ./e2e_test/sink/kafka/upsert_schema2.result \ +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert-schema --from-beginning --property print.key=true --max-messages 11 | sort) 2> /dev/null) +if [ $? -ne 0 ]; then + echo "The output for upsert sink with schema is not as expected." + exit 1 +fi + # test debezium kafka sink after update echo "testing debezium kafka sink after updating data" -(./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-debezium --property print.key=true --from-beginning --max-messages 11 | sort) > ./e2e_test/sink/kafka/debezium2.tmp.result 2> /dev/null +(./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-debezium --property print.key=true --from-beginning --max-messages 11 | sort) > ./e2e_test/sink/kafka/debezium2.tmp.result 2> /dev/null python3 e2e_test/sink/kafka/debezium.py e2e_test/sink/kafka/debezium2.result e2e_test/sink/kafka/debezium2.tmp.result if [ $? -ne 0 ]; then echo "The output for debezium sink after update is not as expected." @@ -81,15 +100,24 @@ psql -h localhost -p 4566 -d dev -U root -c "delete from t_kafka where id = 1;" # test upsert kafka sink after delete echo "testing upsert kafka sink after deleting data" diff ./e2e_test/sink/kafka/upsert3.result \ -<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-upsert --from-beginning --property print.key=true --max-messages 12 | sort) 2> /dev/null) +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert --from-beginning --property print.key=true --max-messages 12 | sort) 2> /dev/null) if [ $? -ne 0 ]; then echo "The output for upsert sink after update is not as expected." exit 1 fi +# test upsert kafka sink with schema after delete +echo "testing upsert kafka sink with schema after deleting data" +diff ./e2e_test/sink/kafka/upsert_schema3.result \ +<((./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert-schema --from-beginning --property print.key=true --max-messages 12 | sort) 2> /dev/null) +if [ $? -ne 0 ]; then + echo "The output for upsert sink with schema is not as expected." + exit 1 +fi + # test debezium kafka sink after delete echo "testing debezium kafka sink after deleting data" -(./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-debezium --property print.key=true --from-beginning --max-messages 13 | sort) > ./e2e_test/sink/kafka/debezium3.tmp.result 2> /dev/null +(./.risingwave/bin/kafka/bin/kafka-console-consumer.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-debezium --property print.key=true --from-beginning --max-messages 13 | sort) > ./e2e_test/sink/kafka/debezium3.tmp.result 2> /dev/null python3 e2e_test/sink/kafka/debezium.py e2e_test/sink/kafka/debezium3.result e2e_test/sink/kafka/debezium3.tmp.result if [ $? -ne 0 ]; then echo "The output for debezium sink after delete is not as expected." @@ -100,6 +128,13 @@ else fi sqllogictest -p 4566 -d dev 'e2e_test/sink/kafka/drop_sink.slt' -./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-append-only --delete > /dev/null 2>&1 -./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-upsert --delete > /dev/null 2>&1 -./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:29092 --topic test-rw-sink-debezium --delete > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-append-only --delete > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-upsert --delete > /dev/null 2>&1 +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-debezium --delete > /dev/null 2>&1 + +# test different encoding +echo "testing protobuf" +cp src/connector/src/test_data/proto_recursive/recursive.pb ./proto-recursive +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-append-only-protobuf --create > /dev/null 2>&1 +sqllogictest -p 4566 -d dev 'e2e_test/sink/kafka/protobuf.slt' +./.risingwave/bin/kafka/bin/kafka-topics.sh --bootstrap-server message_queue:29092 --topic test-rw-sink-append-only-protobuf --delete > /dev/null 2>&1 diff --git a/ci/scripts/e2e-pulsar-sink-test.sh b/ci/scripts/e2e-pulsar-sink-test.sh new file mode 100755 index 0000000000000..a2a0edb550f33 --- /dev/null +++ b/ci/scripts/e2e-pulsar-sink-test.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +source ci/scripts/common.sh + +while getopts 'p:' opt; do + case ${opt} in + p ) + profile=$OPTARG + ;; + \? ) + echo "Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + ;; + esac +done +shift $((OPTIND -1)) + +download_and_prepare_rw "$profile" source + +echo "--- starting risingwave cluster" +cargo make ci-start ci-pulsar-test +sleep 1 + +echo "--- waiting until pulsar is healthy" +HTTP_CODE=404 +MAX_RETRY=20 +while [[ $HTTP_CODE -ne 200 && MAX_RETRY -gt 0 ]] +do + HTTP_CODE=$(curl --connect-timeout 2 -s -o /dev/null -w ''%{http_code}'' http://pulsar:8080/admin/v2/clusters) + ((MAX_RETRY--)) + sleep 5 +done + +# Exits as soon as any line fails. +set -euo pipefail + +echo "--- testing pulsar sink" +sqllogictest -p 4566 -d dev './e2e_test/sink/pulsar_sink.slt' + +sleep 1 + +echo "--- Kill cluster" +cargo make ci-kill \ No newline at end of file diff --git a/ci/scripts/e2e-sink-test.sh b/ci/scripts/e2e-sink-test.sh index e1b700ee0d173..ce2cc46381eba 100755 --- a/ci/scripts/e2e-sink-test.sh +++ b/ci/scripts/e2e-sink-test.sh @@ -5,6 +5,9 @@ set -euo pipefail source ci/scripts/common.sh +# prepare environment +export CONNECTOR_LIBS_PATH="./connector-node/libs" + while getopts 'p:' opt; do case ${opt} in p ) @@ -54,7 +57,7 @@ node_port=50051 node_timeout=10 echo "--- starting risingwave cluster with connector node" -cargo make ci-start ci-kafka +cargo make ci-start ci-1cn-1fe ./connector-node/start-service.sh -p $node_port > .risingwave/log/connector-node.log 2>&1 & echo "waiting for connector node to start" diff --git a/ci/scripts/e2e-source-test.sh b/ci/scripts/e2e-source-test.sh index def1368641641..59618d24641aa 100755 --- a/ci/scripts/e2e-source-test.sh +++ b/ci/scripts/e2e-source-test.sh @@ -7,6 +7,7 @@ source ci/scripts/common.sh # prepare environment export CONNECTOR_RPC_ENDPOINT="localhost:50051" +export CONNECTOR_LIBS_PATH="./connector-node/libs" while getopts 'p:' opt; do case ${opt} in @@ -44,9 +45,9 @@ echo "--- e2e, ci-1cn-1fe, mysql & postgres cdc" mysql --host=mysql --port=3306 -u root -p123456 < ./e2e_test/source/cdc/mysql_cdc.sql # import data to postgres -export PGPASSWORD='postgres'; -createdb -h db -U postgres cdc_test -psql -h db -U postgres -d cdc_test < ./e2e_test/source/cdc/postgres_cdc.sql +export PGHOST=db PGUSER=postgres PGPASSWORD=postgres PGDATABASE=cdc_test +createdb +psql < ./e2e_test/source/cdc/postgres_cdc.sql node_port=50051 node_timeout=10 @@ -79,6 +80,9 @@ cargo make ci-start ci-1cn-1fe-with-recovery echo "waiting for connector node to start" wait_for_connector_node_start +echo "--- inline cdc test" +sqllogictest -p 4566 -d dev './e2e_test/source/cdc_inline/**/*.slt' + echo "--- mysql & postgres cdc validate test" sqllogictest -p 4566 -d dev './e2e_test/source/cdc/cdc.validate.mysql.slt' sqllogictest -p 4566 -d dev './e2e_test/source/cdc/cdc.validate.postgres.slt' @@ -96,7 +100,7 @@ echo "cluster killed " # insert new rows mysql --host=mysql --port=3306 -u root -p123456 < ./e2e_test/source/cdc/mysql_cdc_insert.sql -psql -h db -U postgres -d cdc_test < ./e2e_test/source/cdc/postgres_cdc_insert.sql +psql < ./e2e_test/source/cdc/postgres_cdc_insert.sql echo "inserted new rows into mysql and postgres" # start cluster w/o clean-data @@ -141,6 +145,10 @@ chmod +x ./scripts/source/prepare_data_after_alter.sh ./scripts/source/prepare_data_after_alter.sh 2 sqllogictest -p 4566 -d dev './e2e_test/source/basic/alter/kafka_after_new_data.slt' +echo "--- e2e, kafka alter source again" +./scripts/source/prepare_data_after_alter.sh 3 +sqllogictest -p 4566 -d dev './e2e_test/source/basic/alter/kafka_after_new_data_2.slt' + echo "--- Run CH-benCHmark" ./risedev slt -p 4566 -d dev './e2e_test/ch_benchmark/batch/ch_benchmark.slt' ./risedev slt -p 4566 -d dev './e2e_test/ch_benchmark/streaming/*.slt' diff --git a/ci/scripts/multi-arch-docker.sh b/ci/scripts/multi-arch-docker.sh index f2e83336fead7..e71e1e6f231e6 100755 --- a/ci/scripts/multi-arch-docker.sh +++ b/ci/scripts/multi-arch-docker.sh @@ -14,13 +14,27 @@ date="$(date +%Y%m%d)" ghcraddr="ghcr.io/risingwavelabs/risingwave" dockerhubaddr="risingwavelabs/risingwave" + +arches=() + +if [ "${SKIP_TARGET_AMD64:-false}" != "true" ]; then + arches+=("x86_64") +fi + +if [ "${SKIP_TARGET_AARCH64:-false}" != "true" ]; then + arches+=("aarch64") +fi + # push images to gchr function pushGchr() { GHCRTAG="${ghcraddr}:$1" echo "push to gchr, image tag: ${GHCRTAG}" - docker manifest create --insecure "$GHCRTAG" \ - --amend "${ghcraddr}:${BUILDKITE_COMMIT}-x86_64" \ - --amend "${ghcraddr}:${BUILDKITE_COMMIT}-aarch64" + args=() + for arch in "${arches[@]}" + do + args+=( --amend "${ghcraddr}:${BUILDKITE_COMMIT}-${arch}" ) + done + docker manifest create --insecure "$GHCRTAG" "${args[@]}" docker manifest push --insecure "$GHCRTAG" } @@ -28,9 +42,12 @@ function pushGchr() { function pushDockerhub() { DOCKERTAG="${dockerhubaddr}:$1" echo "push to dockerhub, image tag: ${DOCKERTAG}" - docker manifest create --insecure "$DOCKERTAG" \ - --amend "${dockerhubaddr}:${BUILDKITE_COMMIT}-x86_64" \ - --amend "${dockerhubaddr}:${BUILDKITE_COMMIT}-aarch64" + args=() + for arch in "${arches[@]}" + do + args+=( --amend "${dockerhubaddr}:${BUILDKITE_COMMIT}-${arch}" ) + done + docker manifest create --insecure "$DOCKERTAG" "${args[@]}" docker manifest push --insecure "$DOCKERTAG" } @@ -74,7 +91,10 @@ if [[ -n "${BUILDKITE_TAG}" ]]; then fi echo "--- delete the manifest images from dockerhub" +args=() +for arch in "${arches[@]}" +do + args+=( "${dockerhubaddr}:${BUILDKITE_COMMIT}-${arch}" ) +done docker run --rm lumir/remove-dockerhub-tag \ - --user "risingwavelabs" --password "$DOCKER_TOKEN" \ - "${dockerhubaddr}:${BUILDKITE_COMMIT}-x86_64" \ - "${dockerhubaddr}:${BUILDKITE_COMMIT}-aarch64" \ No newline at end of file + --user "risingwavelabs" --password "$DOCKER_TOKEN" "${args[@]}" \ No newline at end of file diff --git a/ci/scripts/pr-fuzz-test.sh b/ci/scripts/pr-fuzz-test.sh index bbf8471864e9e..66923c4fb8a71 100755 --- a/ci/scripts/pr-fuzz-test.sh +++ b/ci/scripts/pr-fuzz-test.sh @@ -5,35 +5,15 @@ set -euo pipefail source ci/scripts/common.sh -set +e -# Set features, depending on our workflow -# If sqlsmith files are modified, we run tests with sqlsmith enabled. -MATCHES="ci/scripts/cron-fuzz-test.sh\ -\|ci/scripts/pr-fuzz-test.sh\ -\|ci/scripts/run-fuzz-test.sh\ -\|src/tests/sqlsmith" -NOT_MATCHES="\.md" -CHANGED=$(git diff --name-only origin/main | grep -v "$NOT_MATCHES" | grep "$MATCHES") -set -e -# Always run sqlsmith frontend tests -export RUN_SQLSMITH_FRONTEND=1 +# NOTE(kwannoel): Disabled because there's some breakage after #12485, +# see https://github.com/risingwavelabs/risingwave/issues/12577. +# Frontend is relatively stable, e2e fuzz test will cover the same cases also, +# so we can just disable it. +export RUN_SQLSMITH_FRONTEND=0 export RUN_SQLSMITH=1 export SQLSMITH_COUNT=100 - -# Run e2e tests if changes to sqlsmith source files detected. -# NOTE(kwannoel): Keep this here in-case we ever want to revert. -#if [[ -n "$CHANGED" ]]; then -# echo "--- Checking whether to run all sqlsmith tests" -# echo "origin/main SHA: $(git rev-parse origin/main)" -# echo "Changes to Sqlsmith source files detected:" -# echo "$CHANGED" -# export RUN_SQLSMITH=1 -# export SQLSMITH_COUNT=100 -# export TEST_NUM=32 -# echo "Enabled Sqlsmith tests." -#else -# export RUN_SQLSMITH=0 -#fi +export TEST_NUM=32 +echo "Enabled Sqlsmith tests." source ci/scripts/run-fuzz-test.sh diff --git a/ci/scripts/regress-test.sh b/ci/scripts/regress-test.sh index e32eb2c9ad666..aa5912e591df8 100755 --- a/ci/scripts/regress-test.sh +++ b/ci/scripts/regress-test.sh @@ -30,7 +30,6 @@ mv target/debug/risingwave_regress_test-"$profile" target/debug/risingwave_regre chmod +x ./target/debug/risingwave_regress_test echo "--- Postgres regress test" -apt-get -y install locales locale-gen C export LANGUAGE=C export LANG=C diff --git a/ci/scripts/release.sh b/ci/scripts/release.sh index b222e49c08261..08e5794f173cd 100755 --- a/ci/scripts/release.sh +++ b/ci/scripts/release.sh @@ -11,7 +11,7 @@ if [ "${BUILDKITE_SOURCE}" != "schedule" ] && [ "${BUILDKITE_SOURCE}" != "webhoo fi echo "--- Install java and maven" -yum install -y java-11-openjdk wget python3 cyrus-sasl-devel +yum install -y java-11-openjdk java-11-openjdk-devel wget python3 cyrus-sasl-devel pip3 install toml-cli wget https://ci-deps-dist.s3.amazonaws.com/apache-maven-3.9.3-bin.tar.gz && tar -zxvf apache-maven-3.9.3-bin.tar.gz export PATH="${REPO_ROOT}/apache-maven-3.9.3/bin:$PATH" @@ -64,6 +64,10 @@ elif [[ -n "${BINARY_NAME+x}" ]]; then aws s3 cp risingwave-${BINARY_NAME}-x86_64-unknown-linux.tar.gz s3://risingwave-nightly-pre-built-binary fi +echo "--- Build connector node" +cd ${REPO_ROOT}/java && mvn -B package -Dmaven.test.skip=true -Dno-build-rust +cd ${REPO_ROOT} && mv ${REPO_ROOT}/java/connector-node/assembly/target/risingwave-connector-1.0.0.tar.gz risingwave-connector-"${BUILDKITE_TAG}".tar.gz + if [[ -n "${BUILDKITE_TAG}" ]]; then echo "--- Install gh cli" yum install -y dnf @@ -78,13 +82,15 @@ if [[ -n "${BUILDKITE_TAG}" ]]; then tar -czvf risingwave-"${BUILDKITE_TAG}"-x86_64-unknown-linux.tar.gz risingwave gh release upload "${BUILDKITE_TAG}" risingwave-"${BUILDKITE_TAG}"-x86_64-unknown-linux.tar.gz + echo "--- Release upload risingwave debug info" + tar -czvf risingwave-"${BUILDKITE_TAG}"-x86_64-unknown-linux.dwp.tar.gz risingwave.dwp + gh release upload "${BUILDKITE_TAG}" risingwave-"${BUILDKITE_TAG}"-x86_64-unknown-linux.dwp.tar.gz + echo "--- Release upload risectl asset" tar -czvf risectl-"${BUILDKITE_TAG}"-x86_64-unknown-linux.tar.gz risectl gh release upload "${BUILDKITE_TAG}" risectl-"${BUILDKITE_TAG}"-x86_64-unknown-linux.tar.gz echo "--- Release build and upload risingwave connector node jar asset" - cd ${REPO_ROOT}/java && mvn -B package -Dmaven.test.skip=true -Djava.binding.release=true - cd connector-node/assembly/target && mv risingwave-connector-1.0.0.tar.gz risingwave-connector-"${BUILDKITE_TAG}".tar.gz gh release upload "${BUILDKITE_TAG}" risingwave-connector-"${BUILDKITE_TAG}".tar.gz fi diff --git a/ci/scripts/run-backfill-tests.sh b/ci/scripts/run-backfill-tests.sh index d28e28a430536..d0d5eafb3c917 100755 --- a/ci/scripts/run-backfill-tests.sh +++ b/ci/scripts/run-backfill-tests.sh @@ -1,23 +1,31 @@ #!/usr/bin/env bash # Runs backfill tests. -# NOTE(kwannoel): -# The following scenario is adapted in madsim's integration tests as well. -# But this script reproduces it more reliably (I'm not sure why.) -# Hence keeping it in case we ever need to debug backfill again. # USAGE: -# Start a rw cluster then run this script. # ```sh -# ./risedev d +# cargo make ci-start ci-backfill # ./ci/scripts/run-backfill-tests.sh # ``` +# Example progress: +# dev=> select * from rw_catalog.rw_ddl_progress; +# ddl_id | ddl_statement | progress | initialized_at +#--------+------------------------------------------------+----------+------------------------------- +# 1002 | CREATE MATERIALIZED VIEW m1 AS SELECT * FROM t | 56.12% | 2023-09-27 06:37:06.636+00:00 +#(1 row) set -euo pipefail PARENT_PATH=$(dirname "${BASH_SOURCE[0]}") +TEST_DIR=$PWD/e2e_test +BACKGROUND_DDL_DIR=$TEST_DIR/background_ddl +COMMON_DIR=$BACKGROUND_DDL_DIR/common + +CLUSTER_PROFILE='ci-1cn-1fe-with-recovery' +export RUST_LOG="risingwave_meta=debug" + run_sql_file() { psql -h localhost -p 4566 -d dev -U root -f "$@" } @@ -30,26 +38,405 @@ flush() { run_sql "FLUSH;" } -run_sql_file "$PARENT_PATH"/sql/backfill/create_base_table.sql -run_sql_file "$PARENT_PATH"/sql/backfill/insert_seed.sql +cancel_stream_jobs() { + ID=$(run_sql "select ddl_id from rw_catalog.rw_ddl_progress;" | tail -3 | head -1 | grep -E -o "[0-9]*") + echo "CANCELLING STREAM_JOB: $ID" + run_sql "CANCEL JOBS $ID;" .risingwave/log/compute-node.log 2>&1 & +} + +# Test snapshot and upstream read. +test_snapshot_and_upstream_read() { + echo "--- e2e, ci-backfill, test_snapshot_and_upstream_read" + cargo make ci-start ci-backfill + + run_sql_file "$PARENT_PATH"/sql/backfill/create_base_table.sql + + # Provide snapshot + run_sql_file "$PARENT_PATH"/sql/backfill/insert.sql + + # Provide updates ... + run_sql_file "$PARENT_PATH"/sql/backfill/insert.sql & + + # ... and concurrently create mv. + run_sql_file "$PARENT_PATH"/sql/backfill/create_mv.sql & + + wait + + run_sql_file "$PARENT_PATH"/sql/backfill/select.sql deletes.log 2>&1 & + + ./risedev psql -c "CREATE MATERIALIZED VIEW m1 as select * from tomb;" + echo "--- Kill cluster" + kill_cluster + wait +} + +test_backfill_restart_cn_recovery() { + echo "--- e2e, $CLUSTER_PROFILE, test_background_restart_cn_recovery" + cargo make ci-start $CLUSTER_PROFILE + + # Test before recovery + sqllogictest -d dev -h localhost -p 4566 "$COMMON_DIR/create_table.slt" + sqllogictest -d dev -h localhost -p 4566 "$COMMON_DIR/create_bg_mv.slt" + sleep 1 + OLD_PROGRESS=$(run_sql "SHOW JOBS;" | grep -E -o "[0-9]{1,2}\.[0-9]{1,2}") -# Provide snapshot -for i in $(seq 1 12) -do - run_sql_file "$PARENT_PATH"/sql/backfill/insert_recurse.sql - flush -done + # Restart 1 CN + restart_cn -run_sql_file "$PARENT_PATH"/sql/backfill/create_mv.sql & + # Give some time to recover. + sleep 3 -# Provide upstream updates -for i in $(seq 1 5) -do - run_sql_file "$PARENT_PATH"/sql/backfill/insert_recurse.sql & -done + # Test after recovery + sqllogictest -d dev -h localhost -p 4566 "$COMMON_DIR/validate_one_job.slt" -wait + # Recover the mview progress + sleep 5 -run_sql_file "$PARENT_PATH"/sql/backfill/select.sql microbench_instance_type.txt + buildkite-agent artifact upload ./microbench_instance_type.txt + if [[ $instance_type != "m6i.4xlarge" ]]; then + echo "Only m6i.4xlarge is supported, skipping microbenchmark" + exit 0 + fi + # We need cargo criterion to generate machine-readable benchmark results from # microbench. echo "--- Installing cargo criterion" diff --git a/ci/scripts/s3-source-test.sh b/ci/scripts/s3-source-test.sh index 710ba63b6fd60..9fce76f000e31 100755 --- a/ci/scripts/s3-source-test.sh +++ b/ci/scripts/s3-source-test.sh @@ -30,7 +30,7 @@ cargo make ci-start ci-1cn-1fe echo "--- Run test" python3 -m pip install minio psycopg2-binary -python3 e2e_test/s3/$script.py +python3 e2e_test/s3/$script echo "--- Kill cluster" cargo make ci-kill diff --git a/ci/scripts/sql/backfill/insert.sql b/ci/scripts/sql/backfill/insert.sql new file mode 100644 index 0000000000000..f25f8b09cb2b8 --- /dev/null +++ b/ci/scripts/sql/backfill/insert.sql @@ -0,0 +1,6 @@ +insert into t1 +SELECT + generate_series, + '{"orders": {"id": 1, "price": "2.30", "customer_id": 2}}'::jsonb +FROM generate_series(1, 50000); +FLUSH; \ No newline at end of file diff --git a/ci/scripts/sql/backfill/insert_recurse.sql b/ci/scripts/sql/backfill/insert_recurse.sql deleted file mode 100644 index ea1660b3d2624..0000000000000 --- a/ci/scripts/sql/backfill/insert_recurse.sql +++ /dev/null @@ -1 +0,0 @@ -insert into t1 select _id + 1, data from t1; diff --git a/ci/scripts/sql/backfill/insert_seed.sql b/ci/scripts/sql/backfill/insert_seed.sql deleted file mode 100644 index 0791bdb366177..0000000000000 --- a/ci/scripts/sql/backfill/insert_seed.sql +++ /dev/null @@ -1,2 +0,0 @@ -insert into t1 values (1, '{"orders": {"id": 1, "price": "2.30", "customer_id": 2}}'); -flush; \ No newline at end of file diff --git a/ci/scripts/standalone-utils.sh b/ci/scripts/standalone-utils.sh index 4461331c28bfb..438f413ebe4dc 100755 --- a/ci/scripts/standalone-utils.sh +++ b/ci/scripts/standalone-utils.sh @@ -6,7 +6,9 @@ export RW_PREFIX=$PWD/.risingwave export PREFIX_BIN=$RW_PREFIX/bin export PREFIX_LOG=$RW_PREFIX/log -start_standalone() { +# NOTE(kwannoel): Compared to start_standalone below, we omitted the compactor-opts, +# so it should not start. +start_standalone_without_compactor() { RUST_BACKTRACE=1 \ "$PREFIX_BIN"/risingwave/standalone \ --meta-opts=" \ @@ -24,7 +26,6 @@ start_standalone() { --listen-addr 127.0.0.1:5688 \ --prometheus-listener-addr 127.0.0.1:1222 \ --advertise-addr 127.0.0.1:5688 \ - --metrics-level info \ --async-stack-trace verbose \ --connector-rpc-endpoint 127.0.0.1:50051 \ --parallelism 4 \ @@ -36,10 +37,49 @@ start_standalone() { --advertise-addr 127.0.0.1:4566 \ --prometheus-listener-addr 127.0.0.1:2222 \ --health-check-listener-addr 127.0.0.1:6786 \ - --metrics-level info \ --meta-addr http://127.0.0.1:5690" >"$1" 2>&1 } +# You can fill up this section by consulting +# .risingwave/log/risedev.log, after calling ./risedev d full. +# It is expected that minio, etcd will be started after this is called. +start_standalone() { + RUST_BACKTRACE=1 \ + "$PREFIX_BIN"/risingwave/standalone \ + --meta-opts=" \ + --listen-addr 127.0.0.1:5690 \ + --advertise-addr 127.0.0.1:5690 \ + --dashboard-host 127.0.0.1:5691 \ + --prometheus-host 127.0.0.1:1250 \ + --connector-rpc-endpoint 127.0.0.1:50051 \ + --backend etcd \ + --etcd-endpoints 127.0.0.1:2388 \ + --state-store hummock+minio://hummockadmin:hummockadmin@127.0.0.1:9301/hummock001 \ + --data-directory hummock_001 \ + --dashboard-ui-path $RW_PREFIX/ui" \ + --compute-opts=" \ + --listen-addr 127.0.0.1:5688 \ + --prometheus-listener-addr 127.0.0.1:1222 \ + --advertise-addr 127.0.0.1:5688 \ + --async-stack-trace verbose \ + --connector-rpc-endpoint 127.0.0.1:50051 \ + --parallelism 4 \ + --total-memory-bytes 8589934592 \ + --role both \ + --meta-address http://127.0.0.1:5690" \ + --frontend-opts=" \ + --listen-addr 127.0.0.1:4566 \ + --advertise-addr 127.0.0.1:4566 \ + --prometheus-listener-addr 127.0.0.1:2222 \ + --health-check-listener-addr 127.0.0.1:6786 \ + --meta-addr http://127.0.0.1:5690" \ + --compactor-opts=" \ + --listen-addr 127.0.0.1:6660 \ + --prometheus-listener-addr 127.0.0.1:1260 \ + --advertise-addr 127.0.0.1:6660 \ + --meta-address http://127.0.0.1:5690" >"$1" 2>&1 +} + stop_standalone() { pkill standalone } diff --git a/ci/scripts/upload-micro-bench-results.sh b/ci/scripts/upload-micro-bench-results.sh index 2644ca936c5da..e72b69950bb7b 100755 --- a/ci/scripts/upload-micro-bench-results.sh +++ b/ci/scripts/upload-micro-bench-results.sh @@ -36,6 +36,19 @@ get_commit() { | sed 's/\"//g' } +get_machine() { + buildkite-agent artifact download microbench_instance_type.txt ./ + cat ./microbench_instance_type.txt +} + +echo "--- Checking microbench_instance_type" +INSTANCE_TYPE=$(get_machine) +echo "instance type: $INSTANCE_TYPE" +if [[ $INSTANCE_TYPE != "m6i.4xlarge" ]]; then + echo "Only m6i.4xlarge is supported, microbenchmark was skipped" + exit 0 +fi + setup BUILDKITE_BUILD_URL="https://buildkite.com/risingwavelabs/main-cron/builds/$BUILDKITE_BUILD_NUMBER" diff --git a/ci/workflows/docker.yml b/ci/workflows/docker.yml index 3fe1cb5db67e5..d97d99af7691d 100644 --- a/ci/workflows/docker.yml +++ b/ci/workflows/docker.yml @@ -6,6 +6,7 @@ auto-retry: &auto-retry steps: - label: "docker-build-push: amd64" + if: build.env("SKIP_TARGET_AMD64") != "true" command: "ci/scripts/docker.sh" key: "build-amd64" plugins: @@ -18,6 +19,7 @@ steps: retry: *auto-retry - label: "docker-build-push: aarch64" + if: build.env("SKIP_TARGET_AARCH64") != "true" command: "ci/scripts/docker.sh" key: "build-aarch64" plugins: @@ -53,10 +55,9 @@ steps: - docker-compose#v4.9.0: run: release-env config: ci/docker-compose.yml + mount-buildkite-agent: true + propagate-environment: true environment: - BINARY_NAME - - BUILDKITE_SOURCE - GITHUB_TOKEN - - BUILDKITE_COMMIT - - BUILDKITE_TAG retry: *auto-retry diff --git a/ci/workflows/integration-tests.yml b/ci/workflows/integration-tests.yml index 6c4851c0c669c..455f29b210ec1 100644 --- a/ci/workflows/integration-tests.yml +++ b/ci/workflows/integration-tests.yml @@ -29,7 +29,8 @@ steps: - "postgres-cdc" - "mysql-sink" - "postgres-sink" - - "iceberg-sink" + - "iceberg-cdc" + # - "iceberg-sink" - "debezium-mysql" format: - "json" @@ -75,8 +76,12 @@ steps: testcase: "postgres-sink" format: "protobuf" skip: true + # - with: + # testcase: "iceberg-sink" + # format: "protobuf" + # skip: true - with: - testcase: "iceberg-sink" + testcase: "iceberg-cdc" format: "protobuf" skip: true - with: diff --git a/ci/workflows/main-cron.yml b/ci/workflows/main-cron.yml index e1e95d63ff2f5..d8e78952c141f 100644 --- a/ci/workflows/main-cron.yml +++ b/ci/workflows/main-cron.yml @@ -13,7 +13,7 @@ steps: run: rw-build-env config: ci/docker-compose.yml mount-buildkite-agent: true - timeout_in_minutes: 20 + timeout_in_minutes: 25 retry: *auto-retry - label: "build other components" @@ -29,7 +29,7 @@ steps: mount-buildkite-agent: true environment: - GITHUB_TOKEN - timeout_in_minutes: 10 + timeout_in_minutes: 12 retry: *auto-retry - label: "build (deterministic simulation)" @@ -68,6 +68,40 @@ steps: timeout_in_minutes: 60 retry: *auto-retry + - label: "end-to-end test (parallel) (release)" + command: "ci/scripts/e2e-test-parallel.sh -p ci-release" + depends_on: + - "build" + - "docslt" + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + BUILDKITE_ANALYTICS_TOKEN: buildkite-build-analytics-sqllogictest-token + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - test-collector#v1.0.0: + files: "*-junit.xml" + format: "junit" + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 10 + retry: *auto-retry + + - label: "end-to-end test (parallel, in-memory) (release)" + command: "ci/scripts/e2e-test-parallel-in-memory.sh -p ci-release" + depends_on: + - "build" + - "docslt" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 12 + retry: *auto-retry + - label: "end-to-end source test (release)" command: "ci/scripts/e2e-source-test.sh -p ci-release" depends_on: @@ -82,6 +116,20 @@ steps: timeout_in_minutes: 15 retry: *auto-retry + - label: "end-to-end sink test (release)" + command: "ci/scripts/e2e-sink-test.sh -p ci-release" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: sink-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 35 + retry: *auto-retry + - label: "fuzz test" command: "ci/scripts/cron-fuzz-test.sh -p ci-release" depends_on: @@ -111,7 +159,7 @@ steps: config: ci/docker-compose.yml environment: - CODECOV_TOKEN - timeout_in_minutes: 17 + timeout_in_minutes: 20 retry: *auto-retry - label: "unit test (deterministic simulation)" @@ -197,8 +245,52 @@ steps: timeout_in_minutes: 5 retry: *auto-retry + - label: "end-to-end iceberg sink test (release)" + command: "ci/scripts/e2e-iceberg-sink-test.sh -p ci-release" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 5 + retry: *auto-retry + + - label: "end-to-end iceberg sink v2 test (release)" + command: "ci/scripts/e2e-iceberg-sink-v2-test.sh -p ci-release" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 10 + retry: *auto-retry + + - label: "e2e java-binding test (release)" + command: "ci/scripts/java-binding-test.sh -p ci-release" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + # Extra 2 minutes to account for docker-compose latency. + # See: https://github.com/risingwavelabs/risingwave/issues/9423#issuecomment-1521222169 + timeout_in_minutes: 10 + retry: *auto-retry + - label: "S3 source check on AWS (json parser)" - command: "ci/scripts/s3-source-test.sh -p ci-release -s run" + command: "ci/scripts/s3-source-test.sh -p ci-release -s run.py" depends_on: build plugins: - seek-oss/aws-sm#v2.3.1: @@ -215,7 +307,7 @@ steps: retry: *auto-retry - label: "S3 source check on AWS (json parser)" - command: "ci/scripts/s3-source-test.sh -p ci-release -s json_file" + command: "ci/scripts/s3-source-test.sh -p ci-release -s json_file.py" depends_on: build plugins: - seek-oss/aws-sm#v2.3.1: @@ -232,7 +324,41 @@ steps: retry: *auto-retry - label: "S3 source check on AWS (csv parser)" - command: "ci/scripts/s3-source-test.sh -p ci-release -s run_csv" + command: "ci/scripts/s3-source-test.sh -p ci-release -s run_csv.py" + depends_on: build + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + S3_SOURCE_TEST_CONF: ci_s3_source_test_aws + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + environment: + - S3_SOURCE_TEST_CONF + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 25 + retry: *auto-retry + + - label: "S3_v2 source check on AWS (json parser)" + command: "ci/scripts/s3-source-test.sh -p ci-release -s 'fs_source_v2.py json'" + depends_on: build + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + S3_SOURCE_TEST_CONF: ci_s3_source_test_aws + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + environment: + - S3_SOURCE_TEST_CONF + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 25 + retry: *auto-retry + + - label: "S3_v2 source check on AWS (csv parser)" + command: "ci/scripts/s3-source-test.sh -p ci-release -s 'fs_source_v2.py csv_without_header'" depends_on: build plugins: - seek-oss/aws-sm#v2.3.1: @@ -352,7 +478,7 @@ steps: config: ci/docker-compose.yml mount-buildkite-agent: true - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 5 + timeout_in_minutes: 20 retry: *auto-retry - label: "e2e standalone binary test" @@ -367,5 +493,129 @@ steps: config: ci/docker-compose.yml mount-buildkite-agent: true - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 21 + timeout_in_minutes: 25 + retry: *auto-retry + + - label: "end-to-end test for opendal (parallel)" + command: "ci/scripts/e2e-test-parallel-for-opendal.sh -p ci-release" + depends_on: + - "build" + - "docslt" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 14 + retry: *auto-retry + + - label: "end-to-end clickhouse sink test" + command: "ci/scripts/e2e-clickhouse-sink-test.sh -p ci-release" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: sink-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 10 + retry: *auto-retry + + - label: "end-to-end pulsar sink test" + command: "ci/scripts/e2e-pulsar-sink-test.sh -p ci-release" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: sink-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 10 + retry: *auto-retry + + - label: "connector node integration test Java {{matrix.java_version}}" + command: "ci/scripts/connector-node-integration-test.sh -p ci-release -v {{matrix.java_version}}" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + matrix: + setup: + java_version: + - "11" + - "17" + timeout_in_minutes: 10 + retry: *auto-retry + + - label: "release" + command: "ci/scripts/release.sh" + if: build.tag != null + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + GITHUB_TOKEN: github-token + - docker-compose#v4.9.0: + run: release-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + environment: + - GITHUB_TOKEN + - BUILDKITE_TAG + - BUILDKITE_SOURCE + timeout_in_minutes: 60 + retry: *auto-retry + + - label: "release docker image: amd64" + command: "ci/scripts/docker.sh" + key: "build-amd64" + if: build.tag != null + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + GHCR_USERNAME: ghcr-username + GHCR_TOKEN: ghcr-token + DOCKER_TOKEN: docker-token + GITHUB_TOKEN: github-token + timeout_in_minutes: 60 + retry: *auto-retry + + - label: "docker-build-push: aarch64" + command: "ci/scripts/docker.sh" + key: "build-aarch64" + if: build.tag != null + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + GHCR_USERNAME: ghcr-username + GHCR_TOKEN: ghcr-token + DOCKER_TOKEN: docker-token + GITHUB_TOKEN: github-token + timeout_in_minutes: 60 + agents: + queue: "linux-arm64" + retry: *auto-retry + + - label: "multi arch image create push" + command: "ci/scripts/multi-arch-docker.sh" + if: build.tag != null + depends_on: + - "build-amd64" + - "build-aarch64" + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + GHCR_USERNAME: ghcr-username + GHCR_TOKEN: ghcr-token + DOCKER_TOKEN: docker-token + timeout_in_minutes: 10 retry: *auto-retry diff --git a/ci/workflows/main.yml b/ci/workflows/main.yml deleted file mode 100644 index f46061642340a..0000000000000 --- a/ci/workflows/main.yml +++ /dev/null @@ -1,429 +0,0 @@ -auto-retry: &auto-retry - automatic: - # Agent terminated because the AWS EC2 spot instance killed by AWS. - - signal_reason: agent_stop - limit: 3 - -steps: - - label: "build (dev mode)" - command: "ci/scripts/build.sh -p ci-dev" - key: "build-dev" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "build (release mode)" - command: "ci/scripts/build.sh -p ci-release" - key: "build-release" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - env: - - BUILDKITE_COMMIT - timeout_in_minutes: 20 - retry: *auto-retry - - - label: "build other components" - command: "ci/scripts/build-other.sh" - key: "build-other" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - GITHUB_TOKEN: github-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - environment: - - GITHUB_TOKEN - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "build (deterministic simulation)" - command: "ci/scripts/build-simulation.sh" - key: "build-simulation" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "docslt" - command: "ci/scripts/docslt.sh" - key: "docslt" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "end-to-end test (dev mode)" - command: "ci/scripts/e2e-test.sh -p ci-dev -m ci-3streaming-2serving-3fe" - depends_on: - - "build-dev" - - "build-other" - - "docslt" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - BUILDKITE_ANALYTICS_TOKEN: buildkite-build-analytics-sqllogictest-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - test-collector#v1.0.0: - files: "*-junit.xml" - format: "junit" - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "end-to-end test (release mode)" - command: "ci/scripts/e2e-test.sh -p ci-release -m ci-3streaming-2serving-3fe" - depends_on: - - "build-release" - - "build-other" - - "docslt" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - BUILDKITE_ANALYTICS_TOKEN: buildkite-build-analytics-sqllogictest-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - test-collector#v1.0.0: - files: "*-junit.xml" - format: "junit" - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "end-to-end test (parallel) (dev mode)" - command: "ci/scripts/e2e-test-parallel.sh -p ci-dev" - depends_on: - - "build-dev" - - "docslt" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - BUILDKITE_ANALYTICS_TOKEN: buildkite-build-analytics-sqllogictest-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - test-collector#v1.0.0: - files: "*-junit.xml" - format: "junit" - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "end-to-end test (parallel) (release mode)" - command: "ci/scripts/e2e-test-parallel.sh -p ci-release" - depends_on: - - "build-release" - - "docslt" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - BUILDKITE_ANALYTICS_TOKEN: buildkite-build-analytics-sqllogictest-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - test-collector#v1.0.0: - files: "*-junit.xml" - format: "junit" - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "end-to-end test (parallel, in-memory) (release mode)" - command: "ci/scripts/e2e-test-parallel-in-memory.sh -p ci-release" - depends_on: - - "build-release" - - "docslt" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "end-to-end source test (release mode)" - command: "ci/scripts/e2e-source-test.sh -p ci-release" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: source-test-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "unit test" - command: "ci/scripts/pr-unit-test.sh" - plugins: - - ./ci/plugins/swapfile - - seek-oss/aws-sm#v2.3.1: - env: - CODECOV_TOKEN: my-codecov-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - environment: - - CODECOV_TOKEN - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "unit test (deterministic simulation)" - command: "MADSIM_TEST_NUM=50 ci/scripts/deterministic-unit-test.sh" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "integration test (deterministic simulation) - scale" - command: "TEST_NUM=30 ci/scripts/deterministic-it-test.sh scale::" - depends_on: "build-simulation" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 40 - retry: *auto-retry - - - label: "integration test (deterministic simulation) - recovery" - command: "TEST_NUM=30 ci/scripts/deterministic-it-test.sh recovery::" - depends_on: "build-simulation" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 40 - retry: *auto-retry - - - label: "integration test (deterministic simulation) - others" - command: "TEST_NUM=10 ci/scripts/deterministic-it-test.sh backfill_tests:: storage:: sink::" - depends_on: "build-simulation" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 40 - retry: *auto-retry - - - label: "end-to-end test (deterministic simulation)" - command: "TEST_NUM=32 ci/scripts/deterministic-e2e-test.sh" - depends_on: "build-simulation" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - GITHUB_TOKEN: github-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - environment: - - GITHUB_TOKEN - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 30 - retry: *auto-retry - - - label: "recovery test (deterministic simulation)" - command: "TEST_NUM=16 KILL_RATE=0.5 ci/scripts/deterministic-recovery-test.sh" - depends_on: "build-simulation" - plugins: - # - seek-oss/aws-sm#v2.3.1: - # env: - # BUILDKITE_ANALYTICS_TOKEN: buildkite-build-analytics-deterministic-token - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - # - test-collector#v1.0.0: - # files: "*-junit.xml" - # format: "junit" - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 30 - retry: *auto-retry - - - label: "end-to-end sink test (release mode)" - command: "ci/scripts/e2e-sink-test.sh -p ci-release" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: sink-test-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 35 - retry: *auto-retry - - - label: "connector node integration test Java {{matrix.java_version}}" - command: "ci/scripts/connector-node-integration-test.sh -p ci-release -v {{matrix.java_version}}" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - matrix: - setup: - java_version: - - "11" - - "17" - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "end-to-end iceberg sink test (release mode)" - command: "ci/scripts/e2e-iceberg-sink-test.sh -p ci-release" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 5 - retry: *auto-retry - - - label: "end-to-end iceberg sink v2 test (release mode)" - command: "ci/scripts/e2e-iceberg-sink-v2-test.sh -p ci-release" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 5 - retry: *auto-retry - - - label: "end-to-end clickhouse sink test (release mode)" - command: "ci/scripts/e2e-clickhouse-sink-test.sh -p ci-release" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: sink-test-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 5 - retry: *auto-retry - - - label: "e2e java-binding test (at release)" - command: "ci/scripts/java-binding-test.sh -p ci-release" - depends_on: - - "build-release" - - "build-other" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - # Extra 2 minutes to account for docker-compose latency. - # See: https://github.com/risingwavelabs/risingwave/issues/9423#issuecomment-1521222169 - timeout_in_minutes: 10 - retry: *auto-retry - - - label: "release" - command: "ci/scripts/release.sh" - if: build.tag != null - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - GITHUB_TOKEN: github-token - - docker-compose#v4.9.0: - run: release-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - environment: - - GITHUB_TOKEN - - BUILDKITE_TAG - - BUILDKITE_SOURCE - timeout_in_minutes: 60 - retry: *auto-retry - - - label: "release docker image: amd64" - command: "ci/scripts/docker.sh" - key: "build-amd64" - if: build.tag != null - env: - PUSH: true - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - GHCR_USERNAME: ghcr-username - GHCR_TOKEN: ghcr-token - DOCKER_TOKEN: docker-token - GITHUB_TOKEN: github-token - timeout_in_minutes: 60 - retry: *auto-retry - - - label: "docker-build-push: aarch64" - command: "ci/scripts/docker.sh" - key: "build-aarch64" - if: build.tag != null - env: - PUSH: true - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - GHCR_USERNAME: ghcr-username - GHCR_TOKEN: ghcr-token - DOCKER_TOKEN: docker-token - GITHUB_TOKEN: github-token - timeout_in_minutes: 60 - agents: - queue: "linux-arm64" - retry: *auto-retry - - - label: "multi arch image create push" - command: "ci/scripts/multi-arch-docker.sh" - if: build.tag != null - depends_on: - - "build-amd64" - - "build-aarch64" - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - GHCR_USERNAME: ghcr-username - GHCR_TOKEN: ghcr-token - DOCKER_TOKEN: docker-token - timeout_in_minutes: 10 - retry: *auto-retry diff --git a/ci/workflows/pull-request.yml b/ci/workflows/pull-request.yml index 0e4ded02791ec..3aaa09f0d7716 100644 --- a/ci/workflows/pull-request.yml +++ b/ci/workflows/pull-request.yml @@ -19,6 +19,7 @@ steps: - label: "build" command: "ci/scripts/build.sh -p ci-dev" key: "build" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-build") plugins: - docker-compose#v4.9.0: run: rw-build-env @@ -30,6 +31,7 @@ steps: - label: "build other components" command: "ci/scripts/build-other.sh" key: "build-other" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-build-other") plugins: - seek-oss/aws-sm#v2.3.1: env: @@ -40,12 +42,13 @@ steps: mount-buildkite-agent: true environment: - GITHUB_TOKEN - timeout_in_minutes: 10 + timeout_in_minutes: 14 retry: *auto-retry - label: "build (deterministic simulation)" command: "ci/scripts/build-simulation.sh" key: "build-simulation" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-build-simulation") plugins: - docker-compose#v4.9.0: run: rw-build-env @@ -57,6 +60,7 @@ steps: - label: "docslt" command: "ci/scripts/docslt.sh" key: "docslt" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-docslt") plugins: - docker-compose#v4.9.0: run: rw-build-env @@ -67,6 +71,7 @@ steps: - label: "end-to-end test" command: "ci/scripts/e2e-test.sh -p ci-dev -m ci-3streaming-2serving-3fe" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-e2e-test") depends_on: - "build" - "build-other" @@ -82,6 +87,7 @@ steps: - label: "end-to-end test (parallel)" command: "ci/scripts/e2e-test-parallel.sh -p ci-dev" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-e2e-parallel-tests") depends_on: - "build" - "docslt" @@ -95,6 +101,7 @@ steps: retry: *auto-retry - label: "end-to-end test for opendal (parallel)" + if: build.pull_request.labels includes "ci/run-opendal-tests" command: "ci/scripts/e2e-test-parallel-for-opendal.sh -p ci-dev" depends_on: - "build" @@ -109,6 +116,7 @@ steps: retry: *auto-retry - label: "end-to-end test (parallel, in-memory)" + if: build.pull_request.labels includes "ci/run-e2e-parallel-in-memory-tests" command: "ci/scripts/e2e-test-parallel-in-memory.sh -p ci-dev" depends_on: "build" plugins: @@ -122,6 +130,7 @@ steps: - label: "end-to-end source test" command: "ci/scripts/e2e-source-test.sh -p ci-dev" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-e2e-source-tests") depends_on: - "build" - "build-other" @@ -136,6 +145,7 @@ steps: - label: "end-to-end sink test" command: "ci/scripts/e2e-sink-test.sh -p ci-dev" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-e2e-sink-tests") depends_on: - "build" - "build-other" @@ -145,11 +155,12 @@ steps: config: ci/docker-compose.yml mount-buildkite-agent: true - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 18 + timeout_in_minutes: 20 cancel_on_build_failing: true retry: *auto-retry - label: "connector node integration test Java {{matrix.java_version}}" + if: build.pull_request.labels includes "ci/run-java-connector-node-integration-tests" command: "ci/scripts/connector-node-integration-test.sh -p ci-dev -v {{matrix.java_version}}" depends_on: - "build" @@ -169,6 +180,7 @@ steps: retry: *auto-retry - label: "end-to-end iceberg sink test" + if: build.pull_request.labels includes "ci/run-e2e-iceberg-sink-tests" command: "ci/scripts/e2e-iceberg-sink-test.sh -p ci-dev" depends_on: - "build" @@ -183,6 +195,7 @@ steps: retry: *auto-retry - label: "end-to-end iceberg sink v2 test" + if: build.pull_request.labels includes "ci/run-e2e-iceberg-sink-tests" command: "ci/scripts/e2e-iceberg-sink-v2-test.sh -p ci-dev" depends_on: - "build" @@ -196,7 +209,38 @@ steps: timeout_in_minutes: 10 retry: *auto-retry + - label: "end-to-end iceberg cdc test" + if: build.pull_request.labels includes "ci/run-e2e-iceberg-sink-tests" + command: "ci/scripts/e2e-iceberg-cdc.sh -p ci-dev" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: sink-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 10 + retry: *auto-retry + + - label: "end-to-end pulsar sink test" + if: build.pull_request.labels includes "ci/run-e2e-pulsar-sink-tests" + command: "ci/scripts/e2e-pulsar-sink-test.sh -p ci-dev" + depends_on: + - "build" + - "build-other" + plugins: + - docker-compose#v4.9.0: + run: sink-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 10 + retry: *auto-retry + - label: "end-to-end clickhouse sink test" + if: build.pull_request.labels includes "ci/run-e2e-clickhouse-sink-tests" command: "ci/scripts/e2e-clickhouse-sink-test.sh -p ci-dev" depends_on: - "build" @@ -211,6 +255,7 @@ steps: retry: *auto-retry - label: "e2e java-binding test" + if: build.pull_request.labels includes "ci/run-java-binding-tests" command: "ci/scripts/java-binding-test.sh -p ci-dev" depends_on: - "build" @@ -226,6 +271,7 @@ steps: - label: "regress test" command: "ci/scripts/regress-test.sh -p ci-dev" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-regress-test") depends_on: "build" plugins: - docker-compose#v4.9.0: @@ -241,6 +287,7 @@ steps: # This ensures our `main-cron` workflow will be stable. - label: "unit test" command: "ci/scripts/pr-unit-test.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-unit-test") plugins: - ./ci/plugins/swapfile - seek-oss/aws-sm#v2.3.1: @@ -251,11 +298,12 @@ steps: config: ci/docker-compose.yml environment: - CODECOV_TOKEN - timeout_in_minutes: 16 + timeout_in_minutes: 20 retry: *auto-retry - label: "check" command: "ci/scripts/check.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-check") plugins: - gencer/cache#v2.4.10: id: cache @@ -277,6 +325,7 @@ steps: - label: "unit test (deterministic simulation)" command: "ci/scripts/deterministic-unit-test.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-unit-test-deterministic-simulation") plugins: - docker-compose#v4.9.0: run: rw-build-env @@ -288,6 +337,7 @@ steps: - label: "integration test (deterministic simulation)" command: "TEST_NUM=5 ci/scripts/deterministic-it-test.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-integration-test-deterministic-simulation") depends_on: "build-simulation" plugins: - docker-compose#v4.9.0: @@ -299,6 +349,7 @@ steps: - label: "end-to-end test (deterministic simulation)" command: "TEST_NUM=16 ci/scripts/deterministic-e2e-test.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-e2e-test-deterministic-simulation") depends_on: "build-simulation" plugins: - seek-oss/aws-sm#v2.3.1: @@ -317,6 +368,7 @@ steps: - label: "recovery test (deterministic simulation)" command: "TEST_NUM=8 KILL_RATE=0.5 ci/scripts/deterministic-recovery-test.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-recovery-test-deterministic-simulation") depends_on: "build-simulation" plugins: # - seek-oss/aws-sm#v2.3.1: @@ -336,6 +388,7 @@ steps: - label: "misc check" command: "ci/scripts/misc-check.sh" + if: (!build.pull_request.labels includes "ci/skip-ci" || build.pull_request.labels includes "ci/run-misc-check") plugins: - docker-compose#v4.9.0: run: rw-build-env @@ -448,8 +501,78 @@ steps: - "build" plugins: - docker-compose#v4.9.0: - run: ci-flamegraph-env + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 30 + + - label: "e2e standalone binary test" + command: "ci/scripts/e2e-test.sh -p ci-dev -m standalone" + if: build.pull_request.labels includes "ci/run-e2e-standalone-tests" + depends_on: + - "build" + - "build-other" + - "docslt" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env config: ci/docker-compose.yml mount-buildkite-agent: true - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 30 + retry: *auto-retry + + # FIXME(kwannoel): Let the github PR labeller label it, if sqlsmith source files has changes. + - label: "fuzz test" + command: "ci/scripts/pr-fuzz-test.sh -p ci-dev" + if: build.pull_request.labels includes "ci/run-sqlsmith-fuzzing-tests" + depends_on: + - "build" + - "build-simulation" + plugins: + - ./ci/plugins/swapfile + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 15 + retry: *auto-retry + + - label: "enable ci/skip-ci only in draft PRs" + if: build.pull_request.labels includes "ci/skip-ci" && !build.pull_request.draft + commands: + - echo "ci/skip-ci is only usable for draft Pull Requests" + - exit 1 + + - label: "micro benchmark" + command: "ci/scripts/run-micro-benchmarks.sh" + key: "run-micro-benchmarks" + if: build.pull_request.labels includes "ci/run-micro-benchmarks" + plugins: + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + timeout_in_minutes: 60 + retry: *auto-retry + + - label: "upload micro-benchmark" + if: build.pull_request.labels includes "ci/run-upload-micro-benchmark" + command: + - "BUILDKITE_BUILD_NUMBER=$BUILDKITE_BUILD_NUMBER ci/scripts/upload-micro-bench-results.sh" + depends_on: "run-micro-benchmarks" + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + BUILDKITE_TOKEN: buildkite_token + GITHUB_TOKEN: github-token + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + environment: + - BUILDKITE_TOKEN + - GITHUB_TOKEN timeout_in_minutes: 5 diff --git a/ci/workflows/sqlsmith-tests.yml b/ci/workflows/sqlsmith-tests.yml deleted file mode 100644 index 201b3dd8bd20c..0000000000000 --- a/ci/workflows/sqlsmith-tests.yml +++ /dev/null @@ -1,43 +0,0 @@ -auto-retry: &auto-retry - automatic: - # Agent terminated because the AWS EC2 spot instance killed by AWS. - - signal_reason: agent_stop - limit: 3 - -steps: - - label: "check ci image rebuild" - plugins: - - chronotc/monorepo-diff#v2.3.0: - diff: "git diff --name-only origin/main" - watch: - - path: "ci/build-ci-image.sh" - config: - command: "ci/build-ci-image.sh" - label: "ci build images" - retry: *auto-retry - - wait - - - label: "build" - command: "ci/scripts/build.sh -p ci-dev" - key: "build" - plugins: - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - timeout_in_minutes: 15 - retry: *auto-retry - - - label: "fuzz test" - command: "ci/scripts/pr-fuzz-test.sh -p ci-dev" - depends_on: - - "build" - plugins: - - ./ci/plugins/swapfile - - docker-compose#v4.9.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 25 - retry: *auto-retry diff --git a/dashboard/README.md b/dashboard/README.md index 28a9c29f4c6e8..fd4986b4b5bc3 100644 --- a/dashboard/README.md +++ b/dashboard/README.md @@ -28,7 +28,7 @@ TODO: Find a suitable testing framework ## Development -Start the RisingWave database, remove drop tables from `tpch_snapshot.slt` +Start the RisingWave database, and create tables by removing drop tables from `tpch_snapshot.slt` ```bash ./risedev d @@ -41,7 +41,7 @@ Install Dependencies. npm i ``` -The website will be served at port 3000. +The website will be served at port 3000. It requests data from the mock server at port 32333. ```bash npm run dev @@ -50,10 +50,11 @@ npm run dev You should also run: ```bash +./mock/fetch.sh # dump current data from RisingWave meta node to be used by mock server node mock-server.js ``` -To start a mock API server when developing. You can use `fetch.sh` to update the mock APIs. +To start a mock API server when developing. ## Test with RisingWave meta node @@ -61,9 +62,11 @@ To replace the built static files in RisingWave with your newest code, run the following scripts in the root directory. ```bash -./risedev export-dashboard-v2 +./risedev export-dashboard ``` +The dashboard will be served by meta node at port 5691. + ## Deployment ### Generate the protos @@ -79,11 +82,4 @@ Check more details at [Static HTML Export](https://nextjs.org/docs/advanced-feat npm run build-static ``` -#### Next.js app - -The built files are generated at `./.next`. - -```bash -npm run build -npm run start -``` +The built files are generated at `./out`. diff --git a/dashboard/components/BackPressureTable.tsx b/dashboard/components/BackPressureTable.tsx index 4b5e987a1b924..84ba13c3b849a 100644 --- a/dashboard/components/BackPressureTable.tsx +++ b/dashboard/components/BackPressureTable.tsx @@ -38,9 +38,9 @@ interface BackPressuresMetrics { } export default function BackPressureTable({ - selectedActorIds, + selectedFragmentIds, }: { - selectedActorIds: Set + selectedFragmentIds: Set }) { const [backPressuresMetrics, setBackPressuresMetrics] = useState() @@ -53,7 +53,7 @@ export default function BackPressureTable({ let metrics: BackPressuresMetrics = await getActorBackPressures() metrics.outputBufferBlockingDuration = sortBy( metrics.outputBufferBlockingDuration, - (m) => m.metric.actor_id + (m) => (m.metric.fragment_id, m.metric.downstream_fragment_id) ) setBackPressuresMetrics(metrics) await new Promise((resolve) => setTimeout(resolve, 5000)) // refresh every 5 secs @@ -74,25 +74,27 @@ export default function BackPressureTable({ return () => {} }, [toast]) - const isSelected = (actorId: string) => selectedActorIds.has(actorId) + const isSelected = (fragmentId: string) => selectedFragmentIds.has(fragmentId) const retVal = ( Back Pressures (Last 30 minutes) - - - + + + + {backPressuresMetrics && backPressuresMetrics.outputBufferBlockingDuration - .filter((m) => isSelected(m.metric.actor_id)) + .filter((m) => isSelected(m.metric.fragment_id)) .map((m) => ( - - - + + diff --git a/dashboard/components/FragmentGraph.tsx b/dashboard/components/FragmentGraph.tsx index 9c62680d25fc8..aa85501dc66ca 100644 --- a/dashboard/components/FragmentGraph.tsx +++ b/dashboard/components/FragmentGraph.tsx @@ -117,7 +117,7 @@ export default function FragmentGraph({ extraInfo: string } >() - const includedActorIds = new Set() + const includedFragmentIds = new Set() for (const [fragmentId, fragmentRoot] of deps) { const layoutRoot = treeLayoutFlip(fragmentRoot, { dx: nodeMarginX, @@ -137,7 +137,7 @@ export default function FragmentGraph({ height, extraInfo: `Actor ${fragmentRoot.data.actor_ids?.join(", ")}` || "", }) - fragmentRoot.data.actor_ids?.forEach((id) => includedActorIds.add(id)) + includedFragmentIds.add(fragmentId) } const fragmentLayout = layout( fragmentDependencyDag.map(({ width: _1, height: _2, id, ...data }) => { @@ -169,7 +169,7 @@ export default function FragmentGraph({ svgWidth, svgHeight, links, - includedActorIds, + includedFragmentIds, } }, [planNodeDependencies, fragmentDependency]) @@ -189,7 +189,7 @@ export default function FragmentGraph({ links, fragmentLayout: fragmentDependencyDag, layoutResult: planNodeDependencyDag, - includedActorIds, + includedFragmentIds, } = planNodeDependencyDagCallback() useEffect(() => { @@ -434,7 +434,7 @@ export default function FragmentGraph({ - + ) } diff --git a/dashboard/components/Layout.tsx b/dashboard/components/Layout.tsx index 184e17ac1e535..6d6b17cdc7d80 100644 --- a/dashboard/components/Layout.tsx +++ b/dashboard/components/Layout.tsx @@ -140,6 +140,7 @@ function Layout({ children }: { children: React.ReactNode }) { Debug Await Tree Dump + Heap Profiling Settings diff --git a/dashboard/components/metrics.tsx b/dashboard/components/metrics.tsx index efa32175555eb..a933dbc7fd807 100644 --- a/dashboard/components/metrics.tsx +++ b/dashboard/components/metrics.tsx @@ -21,6 +21,11 @@ export interface MetricsSample { } export interface Metrics { + // Tags of this timeseries. Example: {"downstream_fragment_id":"15001","fragment_id":"15002"} metric: { [key: string]: string } + + // Example: [{"timestamp":1695041872.0,"value":0.3797035002929275}, + // {"timestamp":1695041887.0,"value":0.5914327683152408}, + // {"timestamp":1695041902.0,"value":0.8272212493499999}, ... ] sample: MetricsSample[] } diff --git a/dashboard/mock/fetch.sh b/dashboard/mock/fetch.sh index 463d2a836c633..5cc278e01d0c5 100755 --- a/dashboard/mock/fetch.sh +++ b/dashboard/mock/fetch.sh @@ -2,6 +2,9 @@ set -e +cd "$(dirname "$0")" + +set -v curl http://localhost:5691/api/actors > actors.json curl http://localhost:5691/api/clusters/0 > cluster_0.json curl http://localhost:5691/api/clusters/1 > cluster_1.json diff --git a/dashboard/next.config.js b/dashboard/next.config.js index b9b76901086ee..6a82469449776 100644 --- a/dashboard/next.config.js +++ b/dashboard/next.config.js @@ -15,17 +15,20 @@ * */ -module.exports = () => { - const rewrites = () => { +/** + * @type {import('next').NextConfig} + */ +const nextConfig = { + trailingSlash: true, + + rewrites: () => { return [ { source: "/api/:path*", destination: "http://localhost:32333/:path*", }, ] - } - return { - rewrites, - trailingSlash: true, - } + }, } + +module.exports = nextConfig diff --git a/dashboard/package-lock.json b/dashboard/package-lock.json index 482996c302099..d21a177da60a4 100644 --- a/dashboard/package-lock.json +++ b/dashboard/package-lock.json @@ -13,6 +13,7 @@ "@monaco-editor/react": "^4.4.6", "@types/d3": "^7.4.0", "@types/lodash": "^4.14.184", + "base64url": "^3.0.1", "bootstrap-icons": "^1.9.1", "d3": "^7.6.1", "d3-axis": "^3.0.0", @@ -24,7 +25,7 @@ "fabric": "^5.2.1", "framer-motion": "^6.5.1", "lodash": "^4.17.21", - "next": "^13.4.12", + "next": "^13.5.4", "react": "^18.2.0", "react-dom": "^18.2.0", "react-flow-renderer": "10.3.16", @@ -65,9 +66,9 @@ } }, "node_modules/@adobe/css-tools": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.0.1.tgz", - "integrity": "sha512-+u76oB43nOHrF4DDWRLWDCtci7f3QJoEBigemIdIeTi1ODqjx6Tad9NCVnPRwewWlKkVab5PlK8DCtPTyX7S8g==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.1.tgz", + "integrity": "sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==", "dev": true }, "node_modules/@ampproject/remapping": { @@ -84,11 +85,12 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" @@ -134,12 +136,13 @@ } }, "node_modules/@babel/generator": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.0.tgz", - "integrity": "sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "dependencies": { - "@babel/types": "^7.19.0", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "engines": { @@ -189,31 +192,31 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dependencies": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -270,28 +273,28 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz", - "integrity": "sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "engines": { "node": ">=6.9.0" } @@ -320,12 +323,12 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -333,9 +336,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.0.tgz", - "integrity": "sha512-74bEXKX2h+8rrfQUfsBfuZZHzsEs6Eql4pqy/T4Nn6Y9wNPggQOqD6z6pn5Bl8ZfysKouFZT/UXEH94ummEeQw==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", "bin": { "parser": "bin/babel-parser.js" }, @@ -382,31 +385,31 @@ } }, "node_modules/@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.0.tgz", - "integrity": "sha512-4pKpFRDh+utd2mbRC8JLnlsMUii3PMHjpL6a0SZ4NMZy7YFP9aXORxEhdMVOc9CpWtDF09IkciQLEhK7Ml7gRA==", - "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.0", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.0", - "@babel/types": "^7.19.0", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -415,12 +418,12 @@ } }, "node_modules/@babel/types": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.0.tgz", - "integrity": "sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "dependencies": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.18.6", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": { @@ -1881,12 +1884,12 @@ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz", - "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==", + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "node_modules/@loadable/component": { @@ -2033,9 +2036,9 @@ } }, "node_modules/@next/env": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.12.tgz", - "integrity": "sha512-RmHanbV21saP/6OEPBJ7yJMuys68cIf8OBBWd7+uj40LdpmswVAwe1uzeuFyUsd6SfeITWT3XnQfn6wULeKwDQ==" + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.4.tgz", + "integrity": "sha512-LGegJkMvRNw90WWphGJ3RMHMVplYcOfRWf2Be3td3sUa+1AaxmsYyANsA+znrGCBjXJNi4XAQlSoEfUxs/4kIQ==" }, "node_modules/@next/eslint-plugin-next": { "version": "13.4.12", @@ -2067,9 +2070,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.12.tgz", - "integrity": "sha512-deUrbCXTMZ6ZhbOoloqecnUeNpUOupi8SE2tx4jPfNS9uyUR9zK4iXBvH65opVcA/9F5I/p8vDXSYbUlbmBjZg==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.4.tgz", + "integrity": "sha512-Df8SHuXgF1p+aonBMcDPEsaahNo2TCwuie7VXED4FVyECvdXfRT9unapm54NssV9tF3OQFKBFOdlje4T43VO0w==", "cpu": [ "arm64" ], @@ -2082,9 +2085,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.12.tgz", - "integrity": "sha512-WRvH7RxgRHlC1yb5oG0ZLx8F7uci9AivM5/HGGv9ZyG2Als8Ij64GC3d+mQ5sJhWjusyU6T6V1WKTUoTmOB0zQ==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.4.tgz", + "integrity": "sha512-siPuUwO45PnNRMeZnSa8n/Lye5ZX93IJom9wQRB5DEOdFrw0JjOMu1GINB8jAEdwa7Vdyn1oJ2xGNaQpdQQ9Pw==", "cpu": [ "x64" ], @@ -2097,9 +2100,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.12.tgz", - "integrity": "sha512-YEKracAWuxp54tKiAvvq73PUs9lok57cc8meYRibTWe/VdPB2vLgkTVWFcw31YDuRXdEhdX0fWS6Q+ESBhnEig==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.4.tgz", + "integrity": "sha512-l/k/fvRP/zmB2jkFMfefmFkyZbDkYW0mRM/LB+tH5u9pB98WsHXC0WvDHlGCYp3CH/jlkJPL7gN8nkTQVrQ/2w==", "cpu": [ "arm64" ], @@ -2112,9 +2115,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.12.tgz", - "integrity": "sha512-LhJR7/RAjdHJ2Isl2pgc/JaoxNk0KtBgkVpiDJPVExVWA1c6gzY57+3zWuxuyWzTG+fhLZo2Y80pLXgIJv7g3g==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.4.tgz", + "integrity": "sha512-YYGb7SlLkI+XqfQa8VPErljb7k9nUnhhRrVaOdfJNCaQnHBcvbT7cx/UjDQLdleJcfyg1Hkn5YSSIeVfjgmkTg==", "cpu": [ "arm64" ], @@ -2127,9 +2130,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.12.tgz", - "integrity": "sha512-1DWLL/B9nBNiQRng+1aqs3OaZcxC16Nf+mOnpcrZZSdyKHek3WQh6j/fkbukObgNGwmCoVevLUa/p3UFTTqgqg==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.4.tgz", + "integrity": "sha512-uE61vyUSClnCH18YHjA8tE1prr/PBFlBFhxBZis4XBRJoR+txAky5d7gGNUIbQ8sZZ7LVkSVgm/5Fc7mwXmRAg==", "cpu": [ "x64" ], @@ -2142,9 +2145,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.12.tgz", - "integrity": "sha512-kEAJmgYFhp0VL+eRWmUkVxLVunn7oL9Mdue/FS8yzRBVj7Z0AnIrHpTIeIUl1bbdQq1VaoOztnKicAjfkLTRCQ==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.4.tgz", + "integrity": "sha512-qVEKFYML/GvJSy9CfYqAdUexA6M5AklYcQCW+8JECmkQHGoPxCf04iMh7CPR7wkHyWWK+XLt4Ja7hhsPJtSnhg==", "cpu": [ "x64" ], @@ -2157,9 +2160,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.12.tgz", - "integrity": "sha512-GMLuL/loR6yIIRTnPRY6UGbLL9MBdw2anxkOnANxvLvsml4F0HNIgvnU3Ej4BjbqMTNjD4hcPFdlEow4XHPdZA==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.4.tgz", + "integrity": "sha512-mDSQfqxAlfpeZOLPxLymZkX0hYF3juN57W6vFHTvwKlnHfmh12Pt7hPIRLYIShk8uYRsKPtMTth/EzpwRI+u8w==", "cpu": [ "arm64" ], @@ -2172,9 +2175,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.12.tgz", - "integrity": "sha512-PhgNqN2Vnkm7XaMdRmmX0ZSwZXQAtamBVSa9A/V1dfKQCV1rjIZeiy/dbBnVYGdj63ANfsOR/30XpxP71W0eww==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.4.tgz", + "integrity": "sha512-aoqAT2XIekIWoriwzOmGFAvTtVY5O7JjV21giozBTP5c6uZhpvTWRbmHXbmsjZqY4HnEZQRXWkSAppsIBweKqw==", "cpu": [ "ia32" ], @@ -2187,9 +2190,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.12.tgz", - "integrity": "sha512-Z+56e/Ljt0bUs+T+jPjhFyxYBcdY2RIq9ELFU+qAMQMteHo7ymbV7CKmlcX59RI9C4YzN8PgMgLyAoi916b5HA==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.4.tgz", + "integrity": "sha512-cyRvlAxwlddlqeB9xtPSfNSCRy8BOa4wtMo0IuI9P7Y0XT2qpDrpFKRyZ7kUngZis59mPVla5k8X1oOJ8RxDYg==", "cpu": [ "x64" ], @@ -2332,9 +2335,9 @@ "dev": true }, "node_modules/@swc/helpers": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.1.tgz", - "integrity": "sha512-sJ902EfIzn1Fa+qYmjdQqh8tPsoxyBz+8yBKC2HKUxyezKJFwPGOn7pv4WY6QuQW//ySQi5lJjA/ZT9sNWWNTg==", + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", + "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", "dependencies": { "tslib": "^2.4.0" } @@ -3615,6 +3618,14 @@ "resolved": "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz", "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" }, + "node_modules/base64url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", + "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==", + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/big-integer": { "version": "1.6.51", "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz", @@ -8130,9 +8141,15 @@ "optional": true }, "node_modules/nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -8162,39 +8179,37 @@ } }, "node_modules/next": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/next/-/next-13.4.12.tgz", - "integrity": "sha512-eHfnru9x6NRmTMcjQp6Nz0J4XH9OubmzOa7CkWL+AUrUxpibub3vWwttjduu9No16dug1kq04hiUUpo7J3m3Xw==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/next/-/next-13.5.4.tgz", + "integrity": "sha512-+93un5S779gho8y9ASQhb/bTkQF17FNQOtXLKAj3lsNgltEcF0C5PMLLncDmH+8X1EnJH1kbqAERa29nRXqhjA==", "dependencies": { - "@next/env": "13.4.12", - "@swc/helpers": "0.5.1", + "@next/env": "13.5.4", + "@swc/helpers": "0.5.2", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001406", - "postcss": "8.4.14", + "postcss": "8.4.31", "styled-jsx": "5.1.1", - "watchpack": "2.4.0", - "zod": "3.21.4" + "watchpack": "2.4.0" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": ">=16.8.0" + "node": ">=16.14.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "13.4.12", - "@next/swc-darwin-x64": "13.4.12", - "@next/swc-linux-arm64-gnu": "13.4.12", - "@next/swc-linux-arm64-musl": "13.4.12", - "@next/swc-linux-x64-gnu": "13.4.12", - "@next/swc-linux-x64-musl": "13.4.12", - "@next/swc-win32-arm64-msvc": "13.4.12", - "@next/swc-win32-ia32-msvc": "13.4.12", - "@next/swc-win32-x64-msvc": "13.4.12" + "@next/swc-darwin-arm64": "13.5.4", + "@next/swc-darwin-x64": "13.5.4", + "@next/swc-linux-arm64-gnu": "13.5.4", + "@next/swc-linux-arm64-musl": "13.5.4", + "@next/swc-linux-x64-gnu": "13.5.4", + "@next/swc-linux-x64-musl": "13.5.4", + "@next/swc-win32-arm64-msvc": "13.5.4", + "@next/swc-win32-ia32-msvc": "13.5.4", + "@next/swc-win32-x64-msvc": "13.5.4" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", - "fibers": ">= 3.1.0", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -8203,9 +8218,6 @@ "@opentelemetry/api": { "optional": true }, - "fibers": { - "optional": true - }, "sass": { "optional": true } @@ -8663,9 +8675,9 @@ } }, "node_modules/postcss": { - "version": "8.4.14", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz", - "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -8674,10 +8686,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -10885,14 +10901,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/zod": { - "version": "3.21.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.21.4.tgz", - "integrity": "sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==", - "funding": { - "url": "https://github.com/sponsors/colinhacks" - } - }, "node_modules/zustand": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/zustand/-/zustand-3.7.2.tgz", @@ -10918,9 +10926,9 @@ "dev": true }, "@adobe/css-tools": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.0.1.tgz", - "integrity": "sha512-+u76oB43nOHrF4DDWRLWDCtci7f3QJoEBigemIdIeTi1ODqjx6Tad9NCVnPRwewWlKkVab5PlK8DCtPTyX7S8g==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.1.tgz", + "integrity": "sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==", "dev": true }, "@ampproject/remapping": { @@ -10934,11 +10942,12 @@ } }, "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" } }, "@babel/compat-data": { @@ -10971,12 +10980,13 @@ } }, "@babel/generator": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.0.tgz", - "integrity": "sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "requires": { - "@babel/types": "^7.19.0", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "dependencies": { @@ -11013,25 +11023,25 @@ } }, "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==" }, "@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "requires": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" } }, "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-module-imports": { @@ -11073,22 +11083,22 @@ } }, "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==" + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==" }, "@babel/helper-validator-identifier": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz", - "integrity": "sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==" }, "@babel/helper-validator-option": { "version": "7.18.6", @@ -11108,19 +11118,19 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" } }, "@babel/parser": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.0.tgz", - "integrity": "sha512-74bEXKX2h+8rrfQUfsBfuZZHzsEs6Eql4pqy/T4Nn6Y9wNPggQOqD6z6pn5Bl8ZfysKouFZT/UXEH94ummEeQw==" + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==" }, "@babel/plugin-syntax-jsx": { "version": "7.18.6", @@ -11149,39 +11159,39 @@ } }, "@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" } }, "@babel/traverse": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.0.tgz", - "integrity": "sha512-4pKpFRDh+utd2mbRC8JLnlsMUii3PMHjpL6a0SZ4NMZy7YFP9aXORxEhdMVOc9CpWtDF09IkciQLEhK7Ml7gRA==", - "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.0", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.0", - "@babel/types": "^7.19.0", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "requires": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" } }, "@babel/types": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.0.tgz", - "integrity": "sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "requires": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.18.6", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" } }, @@ -12280,12 +12290,12 @@ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" }, "@jridgewell/trace-mapping": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz", - "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==", + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", "requires": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "@loadable/component": { @@ -12409,9 +12419,9 @@ } }, "@next/env": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.12.tgz", - "integrity": "sha512-RmHanbV21saP/6OEPBJ7yJMuys68cIf8OBBWd7+uj40LdpmswVAwe1uzeuFyUsd6SfeITWT3XnQfn6wULeKwDQ==" + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.4.tgz", + "integrity": "sha512-LGegJkMvRNw90WWphGJ3RMHMVplYcOfRWf2Be3td3sUa+1AaxmsYyANsA+znrGCBjXJNi4XAQlSoEfUxs/4kIQ==" }, "@next/eslint-plugin-next": { "version": "13.4.12", @@ -12439,57 +12449,57 @@ } }, "@next/swc-darwin-arm64": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.12.tgz", - "integrity": "sha512-deUrbCXTMZ6ZhbOoloqecnUeNpUOupi8SE2tx4jPfNS9uyUR9zK4iXBvH65opVcA/9F5I/p8vDXSYbUlbmBjZg==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.4.tgz", + "integrity": "sha512-Df8SHuXgF1p+aonBMcDPEsaahNo2TCwuie7VXED4FVyECvdXfRT9unapm54NssV9tF3OQFKBFOdlje4T43VO0w==", "optional": true }, "@next/swc-darwin-x64": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.12.tgz", - "integrity": "sha512-WRvH7RxgRHlC1yb5oG0ZLx8F7uci9AivM5/HGGv9ZyG2Als8Ij64GC3d+mQ5sJhWjusyU6T6V1WKTUoTmOB0zQ==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.4.tgz", + "integrity": "sha512-siPuUwO45PnNRMeZnSa8n/Lye5ZX93IJom9wQRB5DEOdFrw0JjOMu1GINB8jAEdwa7Vdyn1oJ2xGNaQpdQQ9Pw==", "optional": true }, "@next/swc-linux-arm64-gnu": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.12.tgz", - "integrity": "sha512-YEKracAWuxp54tKiAvvq73PUs9lok57cc8meYRibTWe/VdPB2vLgkTVWFcw31YDuRXdEhdX0fWS6Q+ESBhnEig==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.4.tgz", + "integrity": "sha512-l/k/fvRP/zmB2jkFMfefmFkyZbDkYW0mRM/LB+tH5u9pB98WsHXC0WvDHlGCYp3CH/jlkJPL7gN8nkTQVrQ/2w==", "optional": true }, "@next/swc-linux-arm64-musl": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.12.tgz", - "integrity": "sha512-LhJR7/RAjdHJ2Isl2pgc/JaoxNk0KtBgkVpiDJPVExVWA1c6gzY57+3zWuxuyWzTG+fhLZo2Y80pLXgIJv7g3g==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.4.tgz", + "integrity": "sha512-YYGb7SlLkI+XqfQa8VPErljb7k9nUnhhRrVaOdfJNCaQnHBcvbT7cx/UjDQLdleJcfyg1Hkn5YSSIeVfjgmkTg==", "optional": true }, "@next/swc-linux-x64-gnu": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.12.tgz", - "integrity": "sha512-1DWLL/B9nBNiQRng+1aqs3OaZcxC16Nf+mOnpcrZZSdyKHek3WQh6j/fkbukObgNGwmCoVevLUa/p3UFTTqgqg==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.4.tgz", + "integrity": "sha512-uE61vyUSClnCH18YHjA8tE1prr/PBFlBFhxBZis4XBRJoR+txAky5d7gGNUIbQ8sZZ7LVkSVgm/5Fc7mwXmRAg==", "optional": true }, "@next/swc-linux-x64-musl": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.12.tgz", - "integrity": "sha512-kEAJmgYFhp0VL+eRWmUkVxLVunn7oL9Mdue/FS8yzRBVj7Z0AnIrHpTIeIUl1bbdQq1VaoOztnKicAjfkLTRCQ==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.4.tgz", + "integrity": "sha512-qVEKFYML/GvJSy9CfYqAdUexA6M5AklYcQCW+8JECmkQHGoPxCf04iMh7CPR7wkHyWWK+XLt4Ja7hhsPJtSnhg==", "optional": true }, "@next/swc-win32-arm64-msvc": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.12.tgz", - "integrity": "sha512-GMLuL/loR6yIIRTnPRY6UGbLL9MBdw2anxkOnANxvLvsml4F0HNIgvnU3Ej4BjbqMTNjD4hcPFdlEow4XHPdZA==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.4.tgz", + "integrity": "sha512-mDSQfqxAlfpeZOLPxLymZkX0hYF3juN57W6vFHTvwKlnHfmh12Pt7hPIRLYIShk8uYRsKPtMTth/EzpwRI+u8w==", "optional": true }, "@next/swc-win32-ia32-msvc": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.12.tgz", - "integrity": "sha512-PhgNqN2Vnkm7XaMdRmmX0ZSwZXQAtamBVSa9A/V1dfKQCV1rjIZeiy/dbBnVYGdj63ANfsOR/30XpxP71W0eww==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.4.tgz", + "integrity": "sha512-aoqAT2XIekIWoriwzOmGFAvTtVY5O7JjV21giozBTP5c6uZhpvTWRbmHXbmsjZqY4HnEZQRXWkSAppsIBweKqw==", "optional": true }, "@next/swc-win32-x64-msvc": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.12.tgz", - "integrity": "sha512-Z+56e/Ljt0bUs+T+jPjhFyxYBcdY2RIq9ELFU+qAMQMteHo7ymbV7CKmlcX59RI9C4YzN8PgMgLyAoi916b5HA==", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.4.tgz", + "integrity": "sha512-cyRvlAxwlddlqeB9xtPSfNSCRy8BOa4wtMo0IuI9P7Y0XT2qpDrpFKRyZ7kUngZis59mPVla5k8X1oOJ8RxDYg==", "optional": true }, "@nodelib/fs.scandir": { @@ -12604,9 +12614,9 @@ "dev": true }, "@swc/helpers": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.1.tgz", - "integrity": "sha512-sJ902EfIzn1Fa+qYmjdQqh8tPsoxyBz+8yBKC2HKUxyezKJFwPGOn7pv4WY6QuQW//ySQi5lJjA/ZT9sNWWNTg==", + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", + "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", "requires": { "tslib": "^2.4.0" } @@ -13631,6 +13641,11 @@ "resolved": "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz", "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" }, + "base64url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", + "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==" + }, "big-integer": { "version": "1.6.51", "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz", @@ -16976,9 +16991,9 @@ "optional": true }, "nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==" + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==" }, "natural-compare": { "version": "1.4.0", @@ -16999,27 +17014,26 @@ "dev": true }, "next": { - "version": "13.4.12", - "resolved": "https://registry.npmjs.org/next/-/next-13.4.12.tgz", - "integrity": "sha512-eHfnru9x6NRmTMcjQp6Nz0J4XH9OubmzOa7CkWL+AUrUxpibub3vWwttjduu9No16dug1kq04hiUUpo7J3m3Xw==", - "requires": { - "@next/env": "13.4.12", - "@next/swc-darwin-arm64": "13.4.12", - "@next/swc-darwin-x64": "13.4.12", - "@next/swc-linux-arm64-gnu": "13.4.12", - "@next/swc-linux-arm64-musl": "13.4.12", - "@next/swc-linux-x64-gnu": "13.4.12", - "@next/swc-linux-x64-musl": "13.4.12", - "@next/swc-win32-arm64-msvc": "13.4.12", - "@next/swc-win32-ia32-msvc": "13.4.12", - "@next/swc-win32-x64-msvc": "13.4.12", - "@swc/helpers": "0.5.1", + "version": "13.5.4", + "resolved": "https://registry.npmjs.org/next/-/next-13.5.4.tgz", + "integrity": "sha512-+93un5S779gho8y9ASQhb/bTkQF17FNQOtXLKAj3lsNgltEcF0C5PMLLncDmH+8X1EnJH1kbqAERa29nRXqhjA==", + "requires": { + "@next/env": "13.5.4", + "@next/swc-darwin-arm64": "13.5.4", + "@next/swc-darwin-x64": "13.5.4", + "@next/swc-linux-arm64-gnu": "13.5.4", + "@next/swc-linux-arm64-musl": "13.5.4", + "@next/swc-linux-x64-gnu": "13.5.4", + "@next/swc-linux-x64-musl": "13.5.4", + "@next/swc-win32-arm64-msvc": "13.5.4", + "@next/swc-win32-ia32-msvc": "13.5.4", + "@next/swc-win32-x64-msvc": "13.5.4", + "@swc/helpers": "0.5.2", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001406", - "postcss": "8.4.14", + "postcss": "8.4.31", "styled-jsx": "5.1.1", - "watchpack": "2.4.0", - "zod": "3.21.4" + "watchpack": "2.4.0" } }, "node-fetch": { @@ -17354,11 +17368,11 @@ } }, "postcss": { - "version": "8.4.14", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz", - "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "requires": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } @@ -18928,11 +18942,6 @@ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true }, - "zod": { - "version": "3.21.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.21.4.tgz", - "integrity": "sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==" - }, "zustand": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/zustand/-/zustand-3.7.2.tgz", diff --git a/dashboard/package.json b/dashboard/package.json index 94e8fccdf6138..67d2ff0ef1715 100644 --- a/dashboard/package.json +++ b/dashboard/package.json @@ -19,6 +19,7 @@ "@monaco-editor/react": "^4.4.6", "@types/d3": "^7.4.0", "@types/lodash": "^4.14.184", + "base64url": "^3.0.1", "bootstrap-icons": "^1.9.1", "d3": "^7.6.1", "d3-axis": "^3.0.0", @@ -30,7 +31,7 @@ "fabric": "^5.2.1", "framer-motion": "^6.5.1", "lodash": "^4.17.21", - "next": "^13.4.12", + "next": "^13.5.4", "react": "^18.2.0", "react-dom": "^18.2.0", "react-flow-renderer": "10.3.16", diff --git a/dashboard/pages/await_tree.tsx b/dashboard/pages/await_tree.tsx index 8908e121deabd..3db6b6677274a 100644 --- a/dashboard/pages/await_tree.tsx +++ b/dashboard/pages/await_tree.tsx @@ -36,22 +36,32 @@ import { getClusterInfoComputeNode } from "./api/cluster" import useFetch from "./api/fetch" const SIDEBAR_WIDTH = 200 +const ALL_COMPUTE_NODES = "" export default function AwaitTreeDump() { const { response: computeNodes } = useFetch(getClusterInfoComputeNode) - const [computeNodeId, setComputeNodeId] = useState() - const [dump, setDump] = useState("") + const [computeNodeId, setComputeNodeId] = useState() + const [dump, setDump] = useState("") useEffect(() => { - if (computeNodes && !computeNodeId && computeNodes.length > 0) { - setComputeNodeId(computeNodes[0].id) + if (computeNodes && !computeNodeId) { + setComputeNodeId(ALL_COMPUTE_NODES) } }, [computeNodes, computeNodeId]) const dumpTree = async () => { - const title = `Await-Tree Dump of Compute Node ${computeNodeId}:` - setDump(undefined) + if (computeNodeId === undefined) { + return + } + + let title + if (computeNodeId === ALL_COMPUTE_NODES) { + title = "Await-Tree Dump of All Compute Nodes:" + } else { + title = `Await-Tree Dump of Compute Node ${computeNodeId}:` + } + setDump("Loading...") let result @@ -92,10 +102,13 @@ export default function AwaitTreeDump() { Compute Nodes + setComputeNodeId(parseInt(event.target.value)) + } + > + {computeNodes && + computeNodes.map((n) => ( + + ))} + + + + + + Analyze Heap Profile + + Dumped By + + Dumped Files + + + + + + + {displayInfo === undefined ? ( + + ) : ( + + )} + + + + ) + + return ( + + + Heap Profiling + + {retVal} + + ) +} diff --git a/docker/Dockerfile b/docker/Dockerfile index c665735a07718..ba0c82972a783 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -5,29 +5,29 @@ ENV LANG en_US.utf8 RUN apt-get update \ && apt-get -y install ca-certificates build-essential libsasl2-dev openjdk-11-jdk -FROM base AS builder +FROM base AS dashboard-builder -RUN apt-get update && apt-get -y install make cmake protobuf-compiler curl bash lld maven unzip +RUN apt-get install -y curl gnupg protobuf-compiler && mkdir -p /etc/apt/keyrings \ + && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ + && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_18.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \ + && apt-get update && apt-get install -y nodejs -SHELL ["/bin/bash", "-c"] +COPY ./dashboard/ /risingwave/dashboard +COPY ./proto /risingwave/proto -RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y +RUN cd /risingwave/dashboard && npm i && npm run build-static && rm -rf node_modules -RUN mkdir -p /risingwave +FROM base AS rust-base -WORKDIR /risingwave +RUN apt-get -y install make cmake protobuf-compiler curl bash lld unzip -COPY ./ /risingwave +SHELL ["/bin/bash", "-c"] +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y ENV PATH /root/.cargo/bin/:$PATH +ENV CARGO_INCREMENTAL=0 -ENV IN_CONTAINER=1 - -ARG GIT_SHA -ENV GIT_SHA=$GIT_SHA - -RUN curl -LO https://github.com/risingwavelabs/risingwave/archive/refs/heads/dashboard-artifact.zip -RUN unzip dashboard-artifact.zip && mv risingwave-dashboard-artifact /risingwave/ui && rm dashboard-artifact.zip +COPY rust-toolchain rust-toolchain # We need to add the `rustfmt` dependency, otherwise `risingwave_pb` will not compile RUN rustup self update \ @@ -35,16 +35,81 @@ RUN rustup self update \ && rustup show \ && rustup component add rustfmt + +# TODO: cargo-chef doesn't work well now, because we update Cargo.lock very often. +# We may consider sccache instead. + +# RUN cargo install --git https://github.com/xxchan/cargo-chef cargo-chef --locked --rev 11f9fed + +# FROM rust-base AS rust-planner + +# RUN mkdir -p /risingwave +# WORKDIR /risingwave +# COPY ./ /risingwave + +# RUN cargo chef prepare --recipe-path recipe.json + +# FROM rust-base AS rust-builder + +# RUN mkdir -p /risingwave +# WORKDIR /risingwave + +# COPY --from=rust-planner /risingwave/recipe.json recipe.json + +# # Build dependencies - this can be cached if the dependencies don't change +# RUN cargo chef cook --release --recipe-path recipe.json + +FROM rust-base AS rust-builder + +# Build application +ARG GIT_SHA +ENV GIT_SHA=$GIT_SHA + +COPY ./ /risingwave +WORKDIR /risingwave + RUN cargo fetch && \ cargo build -p risingwave_cmd_all --release --features "rw-static-link" && \ - mkdir -p /risingwave/bin && mv /risingwave/target/release/risingwave /risingwave/bin/ && \ + mkdir -p /risingwave/bin && \ + mv /risingwave/target/release/risingwave /risingwave/bin/ && \ + mv /risingwave/target/release/risingwave.dwp /risingwave/bin/ && \ cp ./target/release/build/tikv-jemalloc-sys-*/out/build/bin/jeprof /risingwave/bin/ && \ chmod +x /risingwave/bin/jeprof && \ mkdir -p /risingwave/lib && cargo clean -RUN cd /risingwave/java && mvn -B package -Dmaven.test.skip=true -Djava.binding.release=true && \ - mkdir -p /risingwave/bin/connector-node && \ - tar -zxvf /risingwave/java/connector-node/assembly/target/risingwave-connector-1.0.0.tar.gz -C /risingwave/bin/connector-node +FROM base AS java-planner + +RUN mkdir -p /risingwave +WORKDIR /risingwave + +COPY java /risingwave/java/ + +# Move java/**/pom.xml to poms/**/pom.xml +RUN find . -name pom.xml -exec bash -c 'mkdir -p poms/$(dirname {}); mv {} poms/{}' \; + +# We use rust-maven-plugin to build java-binding. So it's FROM rust-base +FROM rust-base AS java-builder + +RUN apt-get -y install maven + +RUN mkdir -p /risingwave +WORKDIR /risingwave/java + +# 1. copy only poms +COPY --from=java-planner /risingwave/poms /risingwave/java/ + +# 2. start downloading dependencies +RUN mvn dependency:go-offline --fail-never + +# 3. add all source code and start compiling +# TODO: only add java related code so that changing rust code won't recompile java code +# Currently java-binding depends on the workspace Cargo.toml, which depends on the whole rust codebase +# Besides, rust-maven-plugin sets --target-dir, so the dependencies are built twice. How to dedup? +COPY ./ /risingwave + +RUN mvn -B package -Dmaven.test.skip=true -Dno-build-rust && \ + mkdir -p /risingwave/bin/connector-node && \ + tar -zxvf /risingwave/java/connector-node/assembly/target/risingwave-connector-1.0.0.tar.gz -C /risingwave/bin/connector-node FROM base AS risingwave @@ -55,15 +120,19 @@ RUN apt-get -y install gdb \ RUN mkdir -p /risingwave/bin/connector-node && mkdir -p /risingwave/lib -COPY --from=builder /risingwave/bin/risingwave /risingwave/bin/risingwave -COPY --from=builder /risingwave/bin/connector-node /risingwave/bin/connector-node -COPY --from=builder /risingwave/ui /risingwave/ui -COPY --from=builder /risingwave/bin/jeprof /usr/local/bin/jeprof +COPY --from=rust-builder /risingwave/bin/risingwave /risingwave/bin/risingwave +COPY --from=rust-builder /risingwave/bin/risingwave.dwp /risingwave/bin/risingwave.dwp +COPY --from=java-builder /risingwave/bin/connector-node /risingwave/bin/connector-node +COPY --from=dashboard-builder /risingwave/dashboard/out /risingwave/ui +COPY --from=rust-builder /risingwave/bin/jeprof /usr/local/bin/jeprof # Set default playground mode to docker-playground profile ENV PLAYGROUND_PROFILE docker-playground # Set default dashboard UI to local path instead of github proxy ENV RW_DASHBOARD_UI_PATH /risingwave/ui +# Set default connector libs path +ENV CONNECTOR_LIBS_PATH /risingwave/bin/connector-node/libs +ENV IN_CONTAINER=1 ENTRYPOINT [ "/risingwave/bin/risingwave" ] CMD [ "playground" ] diff --git a/docker/Dockerfile.hdfs b/docker/Dockerfile.hdfs index b312438ba80ee..e8dd1988bd6fe 100644 --- a/docker/Dockerfile.hdfs +++ b/docker/Dockerfile.hdfs @@ -44,13 +44,15 @@ ENV JAVA_HOME ${JAVA_HOME_PATH} ENV LD_LIBRARY_PATH ${JAVA_HOME_PATH}/lib/server:${LD_LIBRARY_PATH} RUN cargo fetch && \ - cargo build -p risingwave_cmd_all --release --features "rw-static-link" && \ - mkdir -p /risingwave/bin && mv /risingwave/target/release/risingwave /risingwave/bin/ && \ + cargo build -p risingwave_cmd_all --release -p risingwave_object_store --features hdfs-backend --features "rw-static-link" && \ + mkdir -p /risingwave/bin && \ + mv /risingwave/target/release/risingwave /risingwave/bin/ && \ + mv /risingwave/target/release/risingwave.dwp /risingwave/bin/ && \ cp ./target/release/build/tikv-jemalloc-sys-*/out/build/bin/jeprof /risingwave/bin/ && \ chmod +x /risingwave/bin/jeprof && \ mkdir -p /risingwave/lib && cargo clean -RUN cd /risingwave/java && mvn -B package -Dmaven.test.skip=true -Djava.binding.release=true && \ +RUN cd /risingwave/java && mvn -B package -Dmaven.test.skip=true -Dno-build-rust && \ mkdir -p /risingwave/bin/connector-node && \ tar -zxvf /risingwave/java/connector-node/assembly/target/risingwave-connector-1.0.0.tar.gz -C /risingwave/bin/connector-node @@ -61,6 +63,7 @@ FROM image-base as risingwave LABEL org.opencontainers.image.source https://github.com/risingwavelabs/risingwave RUN mkdir -p /risingwave/bin/connector-node && mkdir -p /risingwave/lib COPY --from=builder /risingwave/bin/risingwave /risingwave/bin/risingwave +COPY --from=builder /risingwave/bin/risingwave.dwp /risingwave/bin/risingwave.dwp COPY --from=builder /risingwave/bin/connector-node /risingwave/bin/connector-node COPY --from=builder /risingwave/ui /risingwave/ui COPY --from=builder /risingwave/hdfs_env.sh /risingwave/hdfs_env.sh @@ -88,6 +91,8 @@ ENV CLASSPATH ${HADOOP_CONF_DIR}:${CLASSPATH} ENV PLAYGROUND_PROFILE docker-playground # Set default dashboard UI to local path instead of github proxy ENV RW_DASHBOARD_UI_PATH /risingwave/ui +# Set default connector libs path +ENV CONNECTOR_LIBS_PATH /risingwave/bin/connector-node/libs -ENTRYPOINT [ "/risingwave/hdfs_env.sh" ] +ENTRYPOINT [ "/risingwave/bin/risingwave" ] CMD [ "playground" ] diff --git a/docker/README.md b/docker/README.md index 5b06140bac9c1..2da87c9f85907 100644 --- a/docker/README.md +++ b/docker/README.md @@ -58,7 +58,7 @@ It will start a minio, a meta node, a compute node, a frontend, a compactor, a p ### s3 and other s3-compatible storage backend To start a RisingWave cluster with s3 backend, configure the aws credit in [aws.env](https://github.com/risingwavelabs/risingwave/blob/main/docker/aws.env). If you want to use some s3 compatible storage like Tencent Cloud COS, just configure one more [endpoint](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/aws.env#L7). -After configuring the environment and fill in your [bucket name and data directory](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-s3.yml#L196), run +After configuring the environment and fill in your [bucket name](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-s3.yml#L196), run ``` # Start all components @@ -68,7 +68,7 @@ docker-compose -f docker-compose-with-s3.yml up It will run with s3 (compatible) object storage with a meta node, a compute node, a frontend, a compactor, a prometheus and a redpanda instance. ### Start with other storage products of public cloud vendors -To start a RisingWave cluster with other storage backend, like Google Cloud Storage, Alicloud OSS or Azure Blob Storage, configure the authentication information in [multiple_object_storage.env](https://github.com/risingwavelabs/risingwave/blob/main/docker/multiple_object_storage.env), fill in your [bucket name and data directory](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-gcs.yml#L196). +To start a RisingWave cluster with other storage backend, like Google Cloud Storage, Alicloud OSS or Azure Blob Storage, configure the authentication information in [multiple_object_storage.env](https://github.com/risingwavelabs/risingwave/blob/main/docker/multiple_object_storage.env), fill in your [bucket name](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-gcs.yml#L196). and run ``` @@ -79,7 +79,7 @@ docker-compose -f docker-compose-with-xxx.yml up It will run RisingWave with corresponding (object) storage products. ### Start with HDFS backend -To start a RisingWave cluster with HDFS, mount your `HADDOP_HOME` in [compactor node volumes](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L28), [compute node volumes](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L112) [compute node volumes](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L218), fill in the [cluster_name/namenode and data_path](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L202), +To start a RisingWave cluster with HDFS, mount your `HADDOP_HOME` in [compactor node volumes](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L28), [compute node volumes](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L112) [compute node volumes](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L218), fill in the [cluster_name/namenode](https://github.com/risingwavelabs/risingwave/blob/a2684461e379ce73f8d730982147439e2379de16/docker/docker-compose-with-hdfs.yml#L202), and run ``` diff --git a/docker/dashboards/risingwave-dev-dashboard.json b/docker/dashboards/risingwave-dev-dashboard.json index b57021d2cef98..9b9bb59829d23 100644 --- a/docker/dashboards/risingwave-dev-dashboard.json +++ b/docker/dashboards/risingwave-dev-dashboard.json @@ -1 +1 @@ -{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dev Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"table_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"table_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (total) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (avg per core) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RW cluster can configure multiple meta nodes to achieve high availability. One is the leader and the rest are the followers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_addr}} @ {{role}}","metric":"","query":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Meta Cluster","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":2},"height":null,"hideTimeOverride":false,"id":9,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The rate of successful recovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery Successful Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of failed reocovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Failed recovery attempts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Time spent in a successful recovery attempt","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency avg","metric":"","query":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Recovery","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":3},"height":null,"hideTimeOverride":false,"id":13,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(rows).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":15,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(MB/s).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RisingWave ingests barriers periodically to trigger computation and checkpoints. The frequency of barrier can be set by barrier_interval_ms. This metric shows how many rows are ingested between two consecutive barriers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":18,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} @ {{instance}}","metric":"","query":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) per barrier","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Upstream Status","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Source Split Change Events frequency by source_id and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Split Change Events frequency(events/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Kafka Consumer Lag Size by source_id, partition and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"high_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}}","metric":"","query":"high_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}} actor_id={{actor_id}}","metric":"","query":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kafka Consumer Lag Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows output by each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(sink_name) (group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (actor_id, sink_name))) by (sink_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_name}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(sink_name) (group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (actor_id, sink_name))) by (sink_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized view {{table_name}} table_id {{materialized_view_id}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill snapshot","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Snapshot Read Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been output from the backfill upstream","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Upstream Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of barriers that have been ingested but not completely processed. This metric reflects the current level of congestion within the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all_barrier","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"in_flight_barrier","metric":"","query":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The duration between the time point when the scheduled barrier needs to be sent and the time point when the barrier gets actually sent to all the compute nodes. Developers can thus detect any internal congestion.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":27,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_avg","metric":"","query":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Send Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_avg","metric":"","query":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier In-Flight Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p999 - {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_avg - {{instance}}","metric":"","query":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Sync Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":31,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_avg","metric":"","query":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Wait Commit Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of actors that have processed the earliest in-flight barriers per second. This metric helps users to detect potential congestion or stuck in the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Earliest In-Flight Barrier Progress","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":4},"height":null,"hideTimeOverride":false,"id":33,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When enabled, this metric shows the input throughput of each executor.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor {{actor_id}}->{{executor_identity}}","metric":"","query":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":35,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Backpressure","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Memory Usage (TaskLocalAlloc)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Materialzed View Memory Usage","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":38,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}->{{upstream_fragment_id}}","metric":"","query":"rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Processing Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Execution Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":44,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}} ","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss when insert {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"temporal join cache miss, table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total cached count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialize Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Over window cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Barrier Align","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":50,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}.{{side}}","metric":"","query":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Match Duration Per Second","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the number of join keys in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_entries{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_entries{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Entries","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the number of rows in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":53,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_rows{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_rows{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the size of rows in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":54,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_estimated_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_estimated_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Estimated Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of matched rows on the opposite side","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":55,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Matched Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Lookup miss count counts the number of aggregation key's cache miss per second.Lookup total count counts the number of rows processed per second.By diving these two metrics, one can derive the cache miss rate per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":56,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each Key/State","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":57,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level cache miss - table {{table_id}} actor {{actor_id}}}","metric":"","query":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each StreamChunk","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":58,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_cached_keys{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg cached keys count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_cached_keys{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_distinct_cached_keys{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg distinct cached keys count |table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_distinct_cached_keys{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each top_n executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":96},"height":null,"hideTimeOverride":false,"id":59,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n appendonly cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"TopN Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in temporal join executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":104},"height":null,"hideTimeOverride":false,"id":60,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal Join cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Cache Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in lookup executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":104},"height":null,"hideTimeOverride":false,"id":61,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lookup Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":112},"height":null,"hideTimeOverride":false,"id":62,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cached_entry_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cached entry count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cached_entry_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache lookup count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Over Window Executor Cache","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":5},"height":null,"hideTimeOverride":false,"id":63,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":64,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":65,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":66,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":67,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":8},"height":null,"hideTimeOverride":false,"id":68,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":8},"height":null,"hideTimeOverride":false,"id":69,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":70,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":16},"height":null,"hideTimeOverride":false,"id":71,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":16},"height":null,"hideTimeOverride":false,"id":72,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":73,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":24},"height":null,"hideTimeOverride":false,"id":74,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":24},"height":null,"hideTimeOverride":false,"id":75,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":76,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":32},"height":null,"hideTimeOverride":false,"id":77,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":32},"height":null,"hideTimeOverride":false,"id":78,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Avg Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors (Tokio)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":6},"height":null,"hideTimeOverride":false,"id":79,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":80,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Send Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":81,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Recv Throughput","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Exchange","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":7},"height":null,"hideTimeOverride":false,"id":82,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":83,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compute Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":84,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":85,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: actor_id={{actor_id}}, source_id={{source_id}})","metric":"","query":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Reader Errors by Type","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"User Streaming Errors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":86,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":87,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Exchange Recv Row Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":88,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mpp Task Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"All memory usage of batch executors in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":89,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mem Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":90,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Heartbeat Worker Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":91,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Row SeqScan Next Duration","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":92,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":93,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{table_id}} @ {{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Hummock has three parts of memory usage: 1. Meta Cache 2. Block CacheThis metric shows the real memory usage of each of these three caches.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":94,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":95,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Miss Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":96,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Iter keys flow","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":97,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p50 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p99 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts pmax - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Merged SSTs","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the latency of Get operations that have been issued to the state store.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":98,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the time spent on iterator initialization.Histogram of the time spent on iterator scanning.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":99,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":100,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter check count- {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":101,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive rate - {{table_id}} - {{type}}","metric":"","query":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"False-Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":102,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter false positive rate - {{table_id}} - {{type}}","metric":"","query":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter False-Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":103,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Slow Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":104,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":105,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":106,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":107,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Read Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":108,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Count - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of a single key-value pair when reading by operation Get.Operation Get gets a single key-value pair with respect to a caller-specified key. If the key does not exist in the storage, the size of key is counted into this metric and the size of value is 0.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":109,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of all the key-value paris when reading by operation Iter.Operation Iter scans a range of key-value pairs.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":110,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":111,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":112,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Read)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":113,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the real memory usage of uploader.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":114,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading memory - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading task size - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader Memory Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of time spent on compacting shared buffer to remote storage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":115,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Build and Sync Sstable Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":116,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Write Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":117,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"merge imm tasks - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploader spill tasks - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Tasks Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":118,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Merging tasks memory size - {{table_id}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploading tasks size - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Task Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":119,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write batch - {{table_id}} @ {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"l0 - {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":120,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":121,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write_batch_kv_pair_count - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Item Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":122,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sync - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the statistics of mem_table size on flush. By default only max (p100) is shown.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":123,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Mem Table Size (Max)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":124,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Sync Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Write)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":11},"height":null,"hideTimeOverride":false,"id":125,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":126,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size(KB) of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":127,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Size(KB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The of bytes that have been written by commit epoch per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":128,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{table_id}}","metric":"","query":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Commit Flush Bytes by Table","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":129,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Failure Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":130,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Success Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have been skipped.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":131,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{level}}-{{type}}","metric":"","query":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Skip Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg l0 select_level_count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":132,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task L0 Select Level Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg file count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":133,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task File Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The distribution of the compact task size triggered, including p90 and max. and categorize it according to different cg, levels and task types.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":134,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task Size Distribution","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that are running.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":135,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor_task_split_count - {{job}} @ {{instance}}","metric":"","query":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Running Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compact-task: The total time have been spent on compaction.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":136,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute_apply_version_duration_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task avg","metric":"","query":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range avg","metric":"","query":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"KBs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":137,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Write refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":138,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Bytes(GiB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Write amplification is the amount of bytes written to the remote storage by compaction for each one byte of flushed SSTable data. Write amplification is by definition higher than 1.0 because we write each piece of data to L0, and then write it again to an SSTable, and then compaction may read this piece of data and write it to a new SSTable, that's another write.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":139,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write amplification","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Amplification","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables that is being compacted at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":140,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"num of compact_task","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":141,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task}}","metric":"","query":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":142,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read/Write by Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":143,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read/Write by level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_bloom_filter, for observing bloom_filter size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":144,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_meta - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_file - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":145,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_key_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_value_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Item Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":146,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_epoch_count - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Stat","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total time of operations which read from remote storage when enable prefetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":147,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Remote Read Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":148,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{type}} @ {{instance}} ","metric":"","query":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Iter keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"bytes of Lsm tree needed to reach balance","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":149,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact pending bytes - {{group}} @ {{instance}} ","metric":"","query":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Compact Pending Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compression ratio of each level of the lsm tree","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":150,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lsm compression ratio - cg{{group}} @ L{{level}} - {{algorithm}} {{instance}} ","metric":"","query":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Level Compression Ratio","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Compaction","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":12},"height":null,"hideTimeOverride":false,"id":151,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":152,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":153,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":154,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":155,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":156,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Failure Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":157,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Retry Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"There are two types of operations: 1. GET, SELECT, and DELETE, they cost 0.0004 USD per 1000 requests. 2. PUT, COPY, POST, LIST, they cost 0.005 USD per 1000 requests.Reading from S3 across different regions impose extra cost. This metric assumes 0.01 USD per 1GB data transfer. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":158,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","format":"time_series","hide":true,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"(Cross Region) Data Transfer Cost","metric":"","query":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GET, SELECT, and all other Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"PUT, COPY, POST, LIST Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Realtime)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric uses the total size of data in S3 at this second to derive the cost of storing data for a whole month. The price is 0.023 USD per GB. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":159,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Monthly Storage Cost","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Monthly)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Object Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":13},"height":null,"hideTimeOverride":false,"id":160,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":161,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":162,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":163,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":164,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} size @ {{instance}}","metric":"","query":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":165,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache hit ratio @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Hit Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":166,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(refill_queue_total) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"refill queue length @ {{instance}}","metric":"","query":"sum(refill_queue_total) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Queue Length","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":167,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(data_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(data_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(data_refill_filtered_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data file cache refill - filtered @ {{instance}}","metric":"","query":"sum(rate(data_refill_filtered_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(meta_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(meta_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":168,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - data file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - data file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - data file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - meta cache refill @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - meta cache refill @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - meta cache refill @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Tiered Cache","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":14},"height":null,"hideTimeOverride":false,"id":169,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":170,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p50 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p99 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time pmax - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lock Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":171,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p50 - {{method}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p99 - {{method}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time pmax - {{method}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Real Process Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":172,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version size","metric":"","query":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":173,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"current version id","metric":"","query":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"checkpoint version id","metric":"","query":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned version id","metric":"","query":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min safepoint version id","metric":"","query":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Id","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":174,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"max committed epoch","metric":"","query":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"safe epoch","metric":"","query":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned epoch","metric":"","query":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":175,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":176,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":177,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\nObjects are classified into 3 groups:\n- not referenced by versions: these object are being deleted from object store.\n- referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n- referenced by current version: these objects are in the latest version.\n\nAdditionally, a metric on all objects (including dangling ones) is updated with low-frequency. The metric is updated right before full GC. So subsequent full GC may reduce the actual value significantly, without updating the metric.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":178,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects (including dangling ones)","metric":"","query":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Refer to `Object Total Number` panel for classification of objects.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":179,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects, including dangling ones","metric":"","query":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of hummock version delta log","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":180,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"delta log total number","metric":"","query":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Delta Log Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"hummock version checkpoint latency","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":181,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_avg","metric":"","query":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Checkpoint Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When certain per compaction group threshold is exceeded (e.g. number of level 0 sub-level in LSMtree), write op to that compaction group is stopped temporarily. Check log for detail reason of write stop.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":182,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compaction_group_{{compaction_group_id}}","metric":"","query":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Stop Compaction Groups","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of attempts to trigger full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":183,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_trigger_count","metric":"","query":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Trigger Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"the object id watermark used in last full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":184,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_last_object_id_watermark","metric":"","query":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Last Watermark","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":185,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Event Loop Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The times of move_state_table occurs","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":186,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"move table cg{{group}}","metric":"","query":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Move State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of state_tables in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":187,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"state table cg{{group}}","metric":"","query":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of branched_sst in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":188,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"branched sst cg{{group}}","metric":"","query":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Branched SST Count","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":15},"height":null,"hideTimeOverride":false,"id":189,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total backup job count since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":190,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"job count","metric":"","query":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Latency of backup jobs since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":191,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p50 - {{state}}","metric":"","query":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p99 - {{state}}","metric":"","query":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time pmax - {{state}}","metric":"","query":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Process Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Backup Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":192,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":193,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":194,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Drop latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":195,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetCatalog latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Catalog Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":17},"height":null,"hideTimeOverride":false,"id":196,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":197,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"AddWorkerNode latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":198,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ListAllNodes latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Cluster Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":199,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":200,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"CreateMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":201,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"DropMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":202,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Flush latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Stream Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":19},"height":null,"hideTimeOverride":false,"id":203,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":204,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinVersionBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":205,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinSnapshotBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":206,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ReportCompactionTasks latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":207,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetNewSstIds latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":20},"height":null,"hideTimeOverride":false,"id":208,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":209,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":210,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_avg","metric":"","query":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"version_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":211,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latencyp90 - {{instance}} ","metric":"","query":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":212,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":213,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_avg","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":214,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":215,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_avg","metric":"","query":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC: Hummock Meta Client","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":21},"height":null,"hideTimeOverride":false,"id":216,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":217,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":218,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":219,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":220,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Running Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":221,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Rejected queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":222,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Completed Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":223,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":224,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Frontend","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":22},"height":null,"hideTimeOverride":false,"id":225,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":226,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager loop count per sec","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":227,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager watermark steps","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"watermark_time is the current lower watermark of cached data. physical_now is the current time of the machine. The diff (physical_now - watermark_time) shows how much data is cached.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":228,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between watermark_time and now (ms)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":229,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The allocated memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":230,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The active memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":231,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_evicted_watermark_time_diff_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"lru_evicted_watermark_time_diff_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between current watermark and evicted watermark time (ms) for actors","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":23},"height":null,"hideTimeOverride":false,"id":232,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":233,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":234,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":235,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":236,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Time since this client instance was created (milli seconds)","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_age{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_age{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Client Age","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current number of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current total size of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Size in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages transmitted (produced) to Kafka brokers","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Produced Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Received Count","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Level Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":237,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages awaiting transmission to broker","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count Pending to Transmit (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages in-flight to broker awaiting response","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Inflight Message Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of transmission errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Transmitting (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of receive errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Receiving (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of requests timed out","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Timeout Request Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker latency / round-trip time in milli seconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"RTT (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker throttling time in milliseconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throttle Time (per broker)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Broker Level Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":2},"height":null,"hideTimeOverride":false,"id":238,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Age of metadata from broker for this topic (milliseconds)","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}","metric":"","query":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Metadata_age Age","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch sizes in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch message counts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Messages","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Topic Level Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":3},"height":null,"hideTimeOverride":false,"id":239,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages ready to be produced in transmit queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message to be Transmitted","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of pre-fetched messages in fetch queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message in pre fetch queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Next offset to fetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Next offset to fetch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Last committed offset","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Committed Offset","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Partition Level Metrics","transformations":[],"transparent":false,"type":"row"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Kafka Native Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":25},"height":null,"hideTimeOverride":false,"id":240,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":241,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Network throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":242,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"S3 throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":243,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"gRPC throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":244,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} grpc {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"IO error rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":245,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Existing connection count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":246,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":247,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection err rate","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network connection","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(table_info, table_id)","description":"Reporting table id of the metric","hide":0,"includeAll":true,"label":"Table","multi":true,"name":"table","options":[],"query":{"query":"label_values(table_info, table_id)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dev_dashboard","uid":"Ecy3uV1nz","version":0} +{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dev Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"table_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"table_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (total) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (avg per core) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RW cluster can configure multiple meta nodes to achieve high availability. One is the leader and the rest are the followers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_addr}} @ {{role}}","metric":"","query":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Meta Cluster","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":2},"height":null,"hideTimeOverride":false,"id":9,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The rate of successful recovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery Successful Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of failed reocovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Failed recovery attempts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Time spent in a successful recovery attempt","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency avg","metric":"","query":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Recovery","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":3},"height":null,"hideTimeOverride":false,"id":13,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(rows).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":15,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(MB/s).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RisingWave ingests barriers periodically to trigger computation and checkpoints. The frequency of barrier can be set by barrier_interval_ms. This metric shows how many rows are ingested between two consecutive barriers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":18,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} @ {{instance}}","metric":"","query":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) per barrier","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Upstream Status","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Source Split Change Events frequency by source_id and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Split Change Events frequency(events/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Kafka Consumer Lag Size by source_id, partition and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"high_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}}","metric":"","query":"high_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}} actor_id={{actor_id}}","metric":"","query":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kafka Consumer Lag Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of rows streamed into each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_id}} {{sink_name}}","metric":"","query":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of rows streamed into each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id, actor_id) * on(actor_id) group_left(sink_name) sink_info{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_id}} {{sink_name}} - actor {{actor_id}}","metric":"","query":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id, actor_id) * on(actor_id) group_left(sink_name) sink_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s) per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mview {{table_id}} {{table_name}}","metric":"","query":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (actor_id, table_id) * on(actor_id, table_id) group_left(table_name) table_info{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mview {{table_id}} {{table_name}} - actor {{actor_id}}","metric":"","query":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (actor_id, table_id) * on(actor_id, table_id) group_left(table_name) table_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s) per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill snapshot","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Snapshot Read Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been output from the backfill upstream","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":27,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Upstream Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of barriers that have been ingested but not completely processed. This metric reflects the current level of congestion within the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all_barrier","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"in_flight_barrier","metric":"","query":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The duration between the time point when the scheduled barrier needs to be sent and the time point when the barrier gets actually sent to all the compute nodes. Developers can thus detect any internal congestion.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_avg","metric":"","query":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Send Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":31,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_avg","metric":"","query":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier In-Flight Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p999 - {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_avg - {{instance}}","metric":"","query":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Sync Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":33,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_avg","metric":"","query":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Wait Commit Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of actors that have processed the earliest in-flight barriers per second. This metric helps users to detect potential congestion or stuck in the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Earliest In-Flight Barrier Progress","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":4},"height":null,"hideTimeOverride":false,"id":35,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When enabled, this metric shows the input throughput of each executor.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_identity}} actor {{actor_id}}","metric":"","query":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fragment {{fragment_id}}->{{downstream_fragment_id}}","metric":"","query":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Blocking Time Ratio (Backpressure)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":38,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Memory Usage (TaskLocalAlloc)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Materialzed View Memory Usage","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, upstream_fragment_id) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fragment {{fragment_id}}<-{{upstream_fragment_id}}","metric":"","query":"avg(rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, upstream_fragment_id) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Processing Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":44,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Execution Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}} ","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss when insert {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"temporal join cache miss, table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total cached count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialize Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":50,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache lookup count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Over Window Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Over window cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Barrier Align","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":53,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":54,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}.{{side}}","metric":"","query":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Match Duration Per Second","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the number of join keys in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":55,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of matched rows on the opposite side","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":56,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Matched Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Lookup miss count counts the number of aggregation key's cache miss per second.Lookup total count counts the number of rows processed per second.By diving these two metrics, one can derive the cache miss rate per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":57,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"distinct agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"distinct agg total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n appendonly total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup executor cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup executor total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each Key/State","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":58,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level cache miss - table {{table_id}} actor {{actor_id}}}","metric":"","query":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each StreamChunk","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":59,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg cached keys count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_distinct_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg distinct cached keys count |table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_distinct_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of dirty (unflushed) groups in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":60,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_dirty_groups_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg dirty groups count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_dirty_groups_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Dirty Groups Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The total heap size of dirty (unflushed) groups in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":96},"height":null,"hideTimeOverride":false,"id":61,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_dirty_groups_heap_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg dirty groups heap size | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_dirty_groups_heap_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Dirty Groups Heap Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each top_n executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":104},"height":null,"hideTimeOverride":false,"id":62,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n appendonly cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"TopN Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in temporal join executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":104},"height":null,"hideTimeOverride":false,"id":63,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal Join cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Cache Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in lookup executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":112},"height":null,"hideTimeOverride":false,"id":64,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lookup Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in over window executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":112},"height":null,"hideTimeOverride":false,"id":65,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_over_window_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"over window cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_over_window_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Over Window Cached Keys","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":5},"height":null,"hideTimeOverride":false,"id":66,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":67,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":68,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":69,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":70,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":8},"height":null,"hideTimeOverride":false,"id":71,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":8},"height":null,"hideTimeOverride":false,"id":72,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":73,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":16},"height":null,"hideTimeOverride":false,"id":74,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":16},"height":null,"hideTimeOverride":false,"id":75,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":76,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":24},"height":null,"hideTimeOverride":false,"id":77,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":24},"height":null,"hideTimeOverride":false,"id":78,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":79,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":32},"height":null,"hideTimeOverride":false,"id":80,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":32},"height":null,"hideTimeOverride":false,"id":81,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Avg Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors (Tokio)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":6},"height":null,"hideTimeOverride":false,"id":82,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":83,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Send Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":84,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Recv Throughput","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Exchange","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":7},"height":null,"hideTimeOverride":false,"id":85,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":86,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compute Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":87,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":88,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: actor_id={{actor_id}}, source_id={{source_id}})","metric":"","query":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Reader Errors by Type","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"User Streaming Errors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":89,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":90,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Exchange Recv Row Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":91,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mpp Task Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"All memory usage of batch executors in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":92,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mem Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":93,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Heartbeat Worker Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the memory usage of mem_table.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":94,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table size total - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table size - table id {{table_id}} instance id {{instance_id}} {{job}} @ {{instance}}","metric":"","query":"state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Mem Table Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the item counts in mem_table.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":95,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table counts total - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table count - table id {{table_id}} instance id {{instance_id}} {{job}} @ {{instance}}","metric":"","query":"state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Mem Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":96,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Row SeqScan Next Duration","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":97,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":98,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{table_id}} @ {{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Hummock has three parts of memory usage: 1. Meta Cache 2. Block CacheThis metric shows the real memory usage of each of these three caches.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":99,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":100,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Miss Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":101,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Iter keys flow","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":102,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p50 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p99 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts pmax - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Merged SSTs","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the latency of Get operations that have been issued to the state store.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":103,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the time spent on iterator initialization.Histogram of the time spent on iterator scanning.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":104,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":105,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter check count- {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":106,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive rate - {{table_id}} - {{type}}","metric":"","query":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"False-Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":107,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter false positive rate - {{table_id}} - {{type}}","metric":"","query":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter False-Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":108,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Slow Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":109,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":110,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":111,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":112,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Read Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":113,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Count - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of a single key-value pair when reading by operation Get.Operation Get gets a single key-value pair with respect to a caller-specified key. If the key does not exist in the storage, the size of key is counted into this metric and the size of value is 0.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":114,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of all the key-value paris when reading by operation Iter.Operation Iter scans a range of key-value pairs.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":115,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":116,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":117,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Read)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":118,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the real memory usage of uploader.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":119,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading memory - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading task size - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader Memory Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of time spent on compacting shared buffer to remote storage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":120,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Build and Sync Sstable Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":121,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Write Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":122,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"merge imm tasks - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploader spill tasks - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Tasks Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":123,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Merging tasks memory size - {{table_id}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploading tasks size - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Task Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":124,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write batch - {{table_id}} @ {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"l0 - {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":125,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":126,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write_batch_kv_pair_count - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Item Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":127,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sync - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the statistics of mem_table size on flush. By default only max (p100) is shown.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":128,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_write_batch_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, table_id, job, instance) (rate(state_store_write_batch_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_write_batch_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, table_id, job, instance) (rate(state_store_write_batch_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Batch Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":129,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Sync Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Write)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":11},"height":null,"hideTimeOverride":false,"id":130,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":131,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size(KB) of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":132,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Size(KB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The of bytes that have been written by commit epoch per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":133,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{table_id}}","metric":"","query":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Commit Flush Bytes by Table","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":134,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Failure Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":135,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Success Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have been skipped.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":136,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{level}}-{{type}}","metric":"","query":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Skip Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg l0 select_level_count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":137,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task L0 Select Level Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg file count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":138,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task File Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The distribution of the compact task size triggered, including p90 and max. and categorize it according to different cg, levels and task types.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":139,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task Size Distribution","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that are running.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":140,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor_task_split_count - {{job}} @ {{instance}}","metric":"","query":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Running Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compact-task: The total time have been spent on compaction.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":141,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute_apply_version_duration_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task avg","metric":"","query":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range avg","metric":"","query":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"KBs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":142,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}}","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_fast_compact_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fast compact - {{job}}","metric":"","query":"sum(rate(compactor_fast_compact_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Write refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":143,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Bytes(GiB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Write amplification is the amount of bytes written to the remote storage by compaction for each one byte of flushed SSTable data. Write amplification is by definition higher than 1.0 because we write each piece of data to L0, and then write it again to an SSTable, and then compaction may read this piece of data and write it to a new SSTable, that's another write.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":144,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write amplification","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Amplification","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables that is being compacted at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":145,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"num of compact_task","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":146,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task}}","metric":"","query":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":147,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read/Write by Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":148,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read/Write by level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_bloom_filter, for observing bloom_filter size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":149,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_meta - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_file - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":150,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_key_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_value_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Item Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":151,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_epoch_count - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Stat","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total time of operations which read from remote storage when enable prefetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":152,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Remote Read Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":153,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{type}} @ {{instance}} ","metric":"","query":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Iter keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"bytes of Lsm tree needed to reach balance","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":154,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact pending bytes - {{group}} @ {{instance}} ","metric":"","query":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Compact Pending Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compression ratio of each level of the lsm tree","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":155,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lsm compression ratio - cg{{group}} @ L{{level}} - {{algorithm}} {{instance}} ","metric":"","query":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Level Compression Ratio","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Compaction","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":12},"height":null,"hideTimeOverride":false,"id":156,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":157,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":158,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":159,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":160,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":161,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Failure Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":162,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Retry Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"There are two types of operations: 1. GET, SELECT, and DELETE, they cost 0.0004 USD per 1000 requests. 2. PUT, COPY, POST, LIST, they cost 0.005 USD per 1000 requests.Reading from S3 across different regions impose extra cost. This metric assumes 0.01 USD per 1GB data transfer. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":163,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","format":"time_series","hide":true,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"(Cross Region) Data Transfer Cost","metric":"","query":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GET, SELECT, and all other Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"PUT, COPY, POST, LIST Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Realtime)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric uses the total size of data in S3 at this second to derive the cost of storing data for a whole month. The price is 0.023 USD per GB. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":164,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Monthly Storage Cost","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Monthly)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Object Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":13},"height":null,"hideTimeOverride":false,"id":165,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":166,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":167,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":168,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":169,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} size @ {{instance}}","metric":"","query":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":170,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache hit ratio @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Hit Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":171,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(refill_queue_total) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"refill queue length @ {{instance}}","metric":"","query":"sum(refill_queue_total) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Queue Length","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":172,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(refill_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(refill_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":173,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Tiered Cache","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":14},"height":null,"hideTimeOverride":false,"id":174,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":175,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p50 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p99 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time pmax - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lock Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":176,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p50 - {{method}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p99 - {{method}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time pmax - {{method}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Real Process Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":177,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version size","metric":"","query":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":178,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"current version id","metric":"","query":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"checkpoint version id","metric":"","query":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned version id","metric":"","query":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min safepoint version id","metric":"","query":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Id","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":179,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"max committed epoch","metric":"","query":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"safe epoch","metric":"","query":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned epoch","metric":"","query":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":180,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":181,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":182,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\nObjects are classified into 3 groups:\n- not referenced by versions: these object are being deleted from object store.\n- referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n- referenced by current version: these objects are in the latest version.\n\nAdditionally, a metric on all objects (including dangling ones) is updated with low-frequency. The metric is updated right before full GC. So subsequent full GC may reduce the actual value significantly, without updating the metric.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":183,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects (including dangling ones)","metric":"","query":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Refer to `Object Total Number` panel for classification of objects.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":184,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects, including dangling ones","metric":"","query":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of hummock version delta log","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":185,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"delta log total number","metric":"","query":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Delta Log Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"hummock version checkpoint latency","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":186,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_avg","metric":"","query":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Checkpoint Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When certain per compaction group threshold is exceeded (e.g. number of level 0 sub-level in LSMtree), write op to that compaction group is stopped temporarily. Check log for detail reason of write stop.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":187,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compaction_group_{{compaction_group_id}}","metric":"","query":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Stop Compaction Groups","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of attempts to trigger full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":188,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_trigger_count","metric":"","query":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Trigger Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"the object id watermark used in last full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":189,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_last_object_id_watermark","metric":"","query":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Last Watermark","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":190,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Event Loop Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The times of move_state_table occurs","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":191,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"move table cg{{group}}","metric":"","query":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Move State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of state_tables in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":192,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"state table cg{{group}}","metric":"","query":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of branched_sst in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":193,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"branched sst cg{{group}}","metric":"","query":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Branched SST Count","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":15},"height":null,"hideTimeOverride":false,"id":194,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total backup job count since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":195,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"job count","metric":"","query":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Latency of backup jobs since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":196,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p50 - {{state}}","metric":"","query":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p99 - {{state}}","metric":"","query":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time pmax - {{state}}","metric":"","query":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Process Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Backup Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":197,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":198,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":199,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Drop latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":200,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetCatalog latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Catalog Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":17},"height":null,"hideTimeOverride":false,"id":201,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":202,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"AddWorkerNode latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":203,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ListAllNodes latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Cluster Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":204,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":205,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"CreateMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":206,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"DropMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":207,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Flush latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Stream Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":19},"height":null,"hideTimeOverride":false,"id":208,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":209,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinVersionBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":210,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinSnapshotBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":211,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ReportCompactionTasks latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":212,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetNewSstIds latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":20},"height":null,"hideTimeOverride":false,"id":213,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":214,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":215,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_avg","metric":"","query":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"version_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":216,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latencyp90 - {{instance}} ","metric":"","query":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":217,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":218,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_avg","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":219,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":220,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_avg","metric":"","query":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC: Hummock Meta Client","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":21},"height":null,"hideTimeOverride":false,"id":221,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":222,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":223,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":224,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":225,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Running Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":226,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Rejected queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":227,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Completed Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":228,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":229,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Frontend","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":22},"height":null,"hideTimeOverride":false,"id":230,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":231,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager loop count per sec","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":232,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager watermark steps","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"watermark_time is the current lower watermark of cached data. physical_now is the current time of the machine. The diff (physical_now - watermark_time) shows how much data is cached.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":233,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between watermark_time and now (ms)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":234,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The allocated memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":235,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The active memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":236,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jvm_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jvm_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The allocated memory of jvm","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":237,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jvm_active_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jvm_active_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The active memory of jvm","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":238,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"} - on() group_right() lru_evicted_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"} - on() group_right() lru_evicted_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between current watermark and evicted watermark time (ms) for actors","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":23},"height":null,"hideTimeOverride":false,"id":239,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":240,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":241,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":242,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":243,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 @ {{connector}} {{sink_id}}","metric":"","query":"histogram_quantile(0.5, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 @ {{connector}} {{sink_id}}","metric":"","query":"histogram_quantile(0.99, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax @ {{connector}} {{sink_id}}","metric":"","query":"histogram_quantile(1.0, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, connector, sink_id)(rate(sink_commit_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(sink_commit_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{connector}} @ {{sink_id}}","metric":"","query":"sum by(le, connector, sink_id)(rate(sink_commit_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(sink_commit_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Commit Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":244,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"latest write epoch @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"latest read epoch @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Read/Write Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":245,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(max(log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Consume lag @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"(max(log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Lag","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":246,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"clamp_min((max(log_store_first_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000, 0)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Consume persistent log lag @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"clamp_min((max(log_store_first_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000, 0)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Consume Persistent Log Lag","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":247,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}}","metric":"","query":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Consume Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":248,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}} @ {{executor_id}} {{instance}}","metric":"","query":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Log Store Consume Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":249,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}}","metric":"","query":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Write Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":250,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}} @ {{executor_id}} {{instance}}","metric":"","query":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Log Store Write Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":251,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_read_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_read_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Read Storage Row Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":252,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_read_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_read_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Read Storage Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":253,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_write_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_write_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Write Storage Row Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":254,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_write_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_write_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Write Storage Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Sink Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":25},"height":null,"hideTimeOverride":false,"id":255,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current number of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":256,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current total size of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":257,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Size in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages transmitted (produced) to Kafka brokers","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":258,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Produced Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":259,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Received Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages awaiting transmission to broker","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":260,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count Pending to Transmit (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages in-flight to broker awaiting response","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":261,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Inflight Message Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of transmission errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":262,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Transmitting (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of receive errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":263,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Receiving (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of requests timed out","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":264,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Timeout Request Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker latency / round-trip time in milli seconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":265,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"RTT (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker throttling time in milliseconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":266,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throttle Time (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Age of metadata from broker for this topic (milliseconds)","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":267,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}","metric":"","query":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Metadata_age Age","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch sizes in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":268,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch message counts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Messages","transformations":[],"transparent":false,"type":"timeseries"}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages ready to be produced in transmit queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":269,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message to be Transmitted","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of pre-fetched messages in fetch queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":270,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message in pre fetch queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Next offset to fetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":271,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Next offset to fetch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Last committed offset","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":272,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Committed Offset","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Kafka Native Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":273,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":274,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Network throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":275,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"S3 throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":276,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"gRPC throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":277,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} grpc {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"IO error rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":278,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Existing connection count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":279,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":280,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection err rate","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network connection","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(table_info, table_id)","description":"Reporting table id of the metric","hide":0,"includeAll":true,"label":"Table","multi":true,"name":"table","options":[],"query":{"query":"label_values(table_info, table_id)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dev_dashboard","uid":"Ecy3uV1nz","version":0} diff --git a/docker/dashboards/risingwave-user-dashboard.json b/docker/dashboards/risingwave-user-dashboard.json index 0044f24f3313f..63e3ed095ac69 100644 --- a/docker/dashboards/risingwave-user-dashboard.json +++ b/docker/dashboards/risingwave-user-dashboard.json @@ -1 +1 @@ -{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":1},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Overview","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{source_name}}","metric":"","query":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":10},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id {{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows output by each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_identity}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows output by each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":18},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_identity}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":9,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Alerts in the system group by type:\n - Too Many Barriers: there are too many uncommitted barriers generated. This means the streaming graph is stuck or under heavy load. Check 'Barrier Latency' panel.\n - Recovery Triggered: cluster recovery is triggered. Check 'Errors by Type' / 'Node Count' panels.\n - Lagging Version: the checkpointed or pinned version id is lagging behind the current version id. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Epoch: the pinned or safe epoch is lagging behind the current max committed epoch. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Compaction: there are too many files in L0. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Lagging Vacuum: there are too many stale files waiting to be cleaned. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Abnormal Meta Cache Memory: the meta cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Block Cache Memory: the block cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Uploading Memory Usage: uploading memory is more than 70 percent of the expected, and is about to spill.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Too Many Barriers","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recovery Triggered","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Version","metric":"","query":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Epoch","metric":"","query":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Compaction","metric":"","query":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Vacuum","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Meta Cache Memory","metric":"","query":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Block Cache Memory","metric":"","query":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Uploading Memory Usage","metric":"","query":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Alerts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Errors in the system group by type","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute error {{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"parse error {{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source error: source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote storage error {{type}}: {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Errors","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Local mode","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distributed mode","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Query QPS","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"height":null,"hideTimeOverride":false,"id":13,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions in frontend nodes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":50},"height":null,"hideTimeOverride":false,"id":15,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of CPU cores per RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Core Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"CPU","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":51},"height":null,"hideTimeOverride":false,"id":18,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Total)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(actor_memory_usage[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"streaming actor - {{actor_id}}","metric":"","query":"rate(actor_memory_usage[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage meta cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage block cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage write buffer - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Detailed)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Executor cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - total lookups - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"temporal join cache miss - table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - total cache count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"memory cache - {{table_id}} @ {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage bloom filter statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter total - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Bloom Filer","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage file cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","query":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage File Cache","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":52},"height":null,"hideTimeOverride":false,"id":27,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Send/Recv throughput per node for streaming exchange","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Send @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recv @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Streming Remote Exchange (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput per node","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Exchange Recv (Rows/s)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":53},"height":null,"hideTimeOverride":false,"id":31,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The storage size of each materialized view","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":33,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Compaction refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":35,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Compaction - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Size statistics for checkpoint","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}}","metric":"","query":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":54},"height":null,"hideTimeOverride":false,"id":38,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized executor actor per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized view {{table_name}} table_id {{materialized_view_id}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill operator used by MV on MV","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Read Snapshot - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Upstream - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Backpressure","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":55},"height":null,"hideTimeOverride":false,"id":44,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Running query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Rejected query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Completed query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Local Execution Mode","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":50,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dashboard","uid":"Fcy3uV1nz","version":0} +{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":1},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Overview","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{source_name}}","metric":"","query":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":10},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id {{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of rows streamed into each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_id}} {{sink_name}}","metric":"","query":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":18},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mview {{table_id}} {{table_name}}","metric":"","query":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":9,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Alerts in the system group by type:\n - Too Many Barriers: there are too many uncommitted barriers generated. This means the streaming graph is stuck or under heavy load. Check 'Barrier Latency' panel.\n - Recovery Triggered: cluster recovery is triggered. Check 'Errors by Type' / 'Node Count' panels.\n - Lagging Version: the checkpointed or pinned version id is lagging behind the current version id. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Epoch: the pinned or safe epoch is lagging behind the current max committed epoch. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Compaction: there are too many files in L0. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Lagging Vacuum: there are too many stale files waiting to be cleaned. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Abnormal Meta Cache Memory: the meta cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Block Cache Memory: the block cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Uploading Memory Usage: uploading memory is more than 70 percent of the expected, and is about to spill.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Too Many Barriers","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recovery Triggered","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Version","metric":"","query":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Epoch","metric":"","query":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Compaction","metric":"","query":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Vacuum","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Meta Cache Memory","metric":"","query":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Block Cache Memory","metric":"","query":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Uploading Memory Usage","metric":"","query":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Alerts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Errors in the system group by type","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute error {{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"parse error {{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source error: source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote storage error {{type}}: {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Errors","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Local mode","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distributed mode","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Query QPS","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"height":null,"hideTimeOverride":false,"id":13,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions in frontend nodes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":50},"height":null,"hideTimeOverride":false,"id":15,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of CPU cores per RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Core Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"CPU","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":51},"height":null,"hideTimeOverride":false,"id":18,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Total)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(actor_memory_usage[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"streaming actor - {{actor_id}}","metric":"","query":"rate(actor_memory_usage[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage meta cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage block cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage write buffer - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Detailed)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Executor cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - total lookups - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal join - cache miss - table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal join - total lookups - table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - total cache count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"memory cache - {{table_id}} @ {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage bloom filter statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter total - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Bloom Filer","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage file cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","query":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage File Cache","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":52},"height":null,"hideTimeOverride":false,"id":27,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Send/Recv throughput per node for streaming exchange","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Send @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recv @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Streming Remote Exchange (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput per node","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Exchange Recv (Rows/s)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":53},"height":null,"hideTimeOverride":false,"id":31,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The storage size of each materialized view","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":33,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Compaction refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":35,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Compaction - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Size statistics for checkpoint","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}}","metric":"","query":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":54},"height":null,"hideTimeOverride":false,"id":38,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized executor actor per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized view {{table_name}} table_id {{materialized_view_id}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill operator used by MV on MV","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Read Snapshot - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Upstream - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fragment {{fragment_id}}->{{downstream_fragment_id}}","metric":"","query":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Blocking Time Ratio (Backpressure)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":55},"height":null,"hideTimeOverride":false,"id":44,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Running query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Rejected query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Completed query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Local Execution Mode","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":50,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dashboard","uid":"Fcy3uV1nz","version":0} diff --git a/docker/docker-compose-with-azblob.yml b/docker/docker-compose-with-azblob.yml index ac67a6dc730f6..91a88c6bbb9e7 100644 --- a/docker/docker-compose-with-azblob.yml +++ b/docker/docker-compose-with-azblob.yml @@ -50,7 +50,7 @@ services: - "--connector-rpc-endpoint" - "connector-node:50051" - "--state-store" - - "hummock+azblob://@" + - "hummock+azblob://" - "--data-directory" - "hummock_001" - "--config-path" diff --git a/docker/docker-compose-with-gcs.yml b/docker/docker-compose-with-gcs.yml index d6d3cd480dcf0..a3c2b145ea3fa 100644 --- a/docker/docker-compose-with-gcs.yml +++ b/docker/docker-compose-with-gcs.yml @@ -50,7 +50,7 @@ services: - "--connector-rpc-endpoint" - "connector-node:50051" - "--state-store" - - "hummock+gcs://@" + - "hummock+gcs://" - "--data-directory" - "hummock_001" - "--config-path" diff --git a/docker/docker-compose-with-hdfs.yml b/docker/docker-compose-with-hdfs.yml index c76d3b3a7ff1d..4467b03a7c039 100644 --- a/docker/docker-compose-with-hdfs.yml +++ b/docker/docker-compose-with-hdfs.yml @@ -11,8 +11,6 @@ services: - "compactor-0:6660" - "--prometheus-listener-addr" - "0.0.0.0:1260" - - "--metrics-level" - - "info" - "--meta-address" - "http://meta-node-0:5690" - "--config-path" @@ -46,8 +44,6 @@ services: - "compute-node-0:5688" - "--prometheus-listener-addr" - "0.0.0.0:1222" - - "--metrics-level" - - "info" - "--meta-address" - "http://meta-node-0:5690" - "--connector-rpc-endpoint" @@ -136,8 +132,6 @@ services: - /risingwave.toml - "--prometheus-listener-addr" - "0.0.0.0:2222" - - "--metrics-level" - - "info" expose: - "4566" ports: @@ -198,7 +192,7 @@ services: - "--connector-rpc-endpoint" - "connector-node:50051" - "--state-store" - - "hummock+hdfs://@" + - "hummock+hdfs://" - "--data-directory" - "hummock_001" - "--config-path" diff --git a/docker/docker-compose-with-oss.yml b/docker/docker-compose-with-oss.yml index edc51d81d3b5b..67729861815e7 100644 --- a/docker/docker-compose-with-oss.yml +++ b/docker/docker-compose-with-oss.yml @@ -50,7 +50,7 @@ services: - "--connector-rpc-endpoint" - "connector-node:50051" - "--state-store" - - "hummock+oss://@" + - "hummock+oss://" - "--data-directory" - "hummock_001" - "--config-path" diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 107124d5c27b5..4dbd5fe5bb28d 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -2,7 +2,7 @@ version: "3" services: compactor-0: - image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.0.0}" + image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.3.0}" command: - compactor-node - "--listen-addr" @@ -11,8 +11,6 @@ services: - "compactor-0:6660" - "--prometheus-listener-addr" - "0.0.0.0:1260" - - "--metrics-level" - - "info" - "--meta-address" - "http://meta-node-0:5690" - "--config-path" @@ -39,7 +37,7 @@ services: timeout: 5s retries: 5 compute-node-0: - image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.0.0}" + image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.3.0}" command: - compute-node - "--listen-addr" @@ -48,8 +46,6 @@ services: - "compute-node-0:5688" - "--prometheus-listener-addr" - "0.0.0.0:1222" - - "--metrics-level" - - "info" - "--meta-address" - "http://meta-node-0:5690" - "--connector-rpc-endpoint" @@ -126,7 +122,7 @@ services: timeout: 5s retries: 5 frontend-node-0: - image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.0.0}" + image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.3.0}" command: - frontend-node - "--listen-addr" @@ -139,8 +135,6 @@ services: - /risingwave.toml - "--prometheus-listener-addr" - "0.0.0.0:2222" - - "--metrics-level" - - "info" expose: - "4566" ports: @@ -185,7 +179,7 @@ services: timeout: 5s retries: 5 meta-node-0: - image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.0.0}" + image: "ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.3.0}" command: - meta-node - "--listen-addr" @@ -266,6 +260,7 @@ services: MINIO_PROMETHEUS_URL: "http://prometheus-0:9500" MINIO_ROOT_PASSWORD: hummockadmin MINIO_ROOT_USER: hummockadmin + MINIO_DOMAIN: "minio-0" container_name: minio-0 healthcheck: test: @@ -301,7 +296,7 @@ services: timeout: 5s retries: 5 connector-node: - image: ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.0.0} + image: ghcr.io/risingwavelabs/risingwave:${RW_IMAGE_VERSION:-v1.3.0} entrypoint: "/risingwave/bin/connector-node/start-service.sh" ports: - 50051 diff --git a/docs/developer-guide.md b/docs/developer-guide.md index e256d1a5ebd83..7d072e7da2e44 100644 --- a/docs/developer-guide.md +++ b/docs/developer-guide.md @@ -2,7 +2,7 @@ This guide is intended to be used by contributors to learn about how to develop RisingWave. The instructions about how to submit code changes are included in [contributing guidelines](../CONTRIBUTING.md). -If you have questions, you can search for existing discussions or start a new discussion in the [Discussions forum of RisingWave](https://github.com/risingwavelabs/risingwave/discussions), or ask in the RisingWave Community channel on Slack. Please use the [invitation link](https://join.slack.com/t/risingwave-community/shared_invite/zt-120rft0mr-d8uGk3d~NZiZAQWPnElOfw) to join the channel. +If you have questions, you can search for existing discussions or start a new discussion in the [Discussions forum of RisingWave](https://github.com/risingwavelabs/risingwave/discussions), or ask in the RisingWave Community channel on Slack. Please use the [invitation link](https://risingwave.com/slack) to join the channel. To report bugs, create a [GitHub issue](https://github.com/risingwavelabs/risingwave/issues/new/choose). @@ -23,9 +23,8 @@ http://ecotrust-canada.github.io/markdown-toc/ * [Start the playground with RiseDev](#start-the-playground-with-risedev) * [Start the playground with cargo](#start-the-playground-with-cargo) - [Debug playground using vscode](#debug-playground-using-vscode) +- [Use standalone-mode](#use-standalone-mode) - [Develop the dashboard](#develop-the-dashboard) - * [Dashboard v1](#dashboard-v1) - * [Dashboard v2](#dashboard-v2) - [Observability components](#observability-components) * [Cluster Control](#cluster-control) * [Monitoring](#monitoring) @@ -61,7 +60,7 @@ You can also read the [crate level documentation](https://risingwavelabs.github. - The `docker` folder contains Docker files to build and start RisingWave. - The `e2e_test` folder contains the latest end-to-end test cases. - The `docs` folder contains the design docs. If you want to learn about how RisingWave is designed and implemented, check out the design docs here. -- The `dashboard` folder contains RisingWave dashboard v2. +- The `dashboard` folder contains RisingWave dashboard. The [src/README.md](../src/README.md) file contains more details about Design Patterns in RisingWave. @@ -86,7 +85,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh To install the dependencies on Debian-based Linux systems, run: ```shell -sudo apt install make build-essential cmake protobuf-compiler curl postgresql-client tmux lld pkg-config libssl-dev +sudo apt install make build-essential cmake protobuf-compiler curl postgresql-client tmux lld pkg-config libssl-dev libsasl2-dev curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ``` @@ -155,7 +154,6 @@ For example, you can modify the default section to: default: - use: minio - use: meta-node - enable-dashboard-v2: false - use: compute-node - use: frontend - use: prometheus @@ -209,25 +207,17 @@ psql -h localhost -p 4566 -d dev -U root To step through risingwave locally with a debugger you can use the `launch.json` and the `tasks.json` provided in `vscode_suggestions`. After adding these files to your local `.vscode` folder you can debug and set breakpoints by launching `Launch 'risingwave p' debug`. +## Use standalone-mode + +Please refer to [README](../src/cmd_all/src/README.md) for more details. + ## Develop the dashboard Currently, RisingWave has two versions of dashboards. You can use RiseDev config to select which version to use. The dashboard will be available at `http://127.0.0.1:5691/` on meta node. -### Dashboard v1 - -Dashboard v1 is a single HTML page. To preview and develop this version, install Node.js, and run this command: - -```shell -cd src/meta/src/dashboard && npx reload -b -``` - -Dashboard v1 is bundled by default along with meta node. When the cluster is started, you may use the dashboard without any configuration. - -### Dashboard v2 - -The development instructions for dashboard v2 are available [here](../dashboard/README.md). +The development instructions for dashboard are available [here](../dashboard/README.md). ## Observability components diff --git a/docs/memory-profiling.md b/docs/memory-profiling.md index fec324770f0c8..759807ba1e9ed 100644 --- a/docs/memory-profiling.md +++ b/docs/memory-profiling.md @@ -158,7 +158,7 @@ cp ./target/release/examples/addr2line Find a Linux machine and use `docker` command to start an environment with the specific RisingWave version. Here, `-v $(pwd):/dumps` mounts current directory to `/dumps` folder inside the container, so that you don't need to copy the files in and out. ```bash -docker run -it --rm --entrypoint /bin/bash -v $(pwd):/dumps ghcr.io/risingwavelabs/risingwave:v1.0.0 +docker run -it --rm --entrypoint /bin/bash -v $(pwd):/dumps ghcr.io/risingwavelabs/risingwave:latest ``` diff --git a/e2e_test/background_ddl/basic.slt b/e2e_test/background_ddl/basic.slt new file mode 100644 index 0000000000000..d0fee4917b565 --- /dev/null +++ b/e2e_test/background_ddl/basic.slt @@ -0,0 +1,72 @@ +statement ok +SET BACKGROUND_DDL=true; + +statement ok +ALTER SYSTEM SET max_concurrent_creating_streaming_jobs TO 4; + +statement ok +CREATE TABLE t (v1 int); + +statement ok +INSERT INTO t select * from generate_series(1, 200000); + +statement ok +FLUSH; + +statement ok +CREATE MATERIALIZED VIEW m1 as SELECT * FROM t; + +statement ok +CREATE MATERIALIZED VIEW m2 as SELECT * FROM t; + +statement ok +CREATE MATERIALIZED VIEW m3 as SELECT * FROM t; + +# Disable the flaky check: +# query I +# select count(*) from rw_catalog.rw_ddl_progress; +# ---- +# 3 + +statement error +SELECT * FROM m1; + +# Meta should always reject duplicate mview. +statement error +CREATE MATERIALIZED VIEW m3 as SELECT * FROM t; + +# Wait for background ddl to finish +sleep 30s + +query I +select count(*) from m1; +---- +200000 + +query I +select count(*) from m2; +---- +200000 + +query I +select count(*) from m3; +---- +200000 + +statement ok +DROP MATERIALIZED VIEW m1; + +statement ok +DROP MATERIALIZED VIEW m2; + +statement ok +DROP MATERIALIZED VIEW m3; + +statement ok +DROP TABLE t; + +statement ok +SET BACKGROUND_DDL=false; + +statement ok +ALTER SYSTEM SET max_concurrent_creating_streaming_jobs TO 1; diff --git a/e2e_test/background_ddl/common/create_bg_mv.slt b/e2e_test/background_ddl/common/create_bg_mv.slt new file mode 100644 index 0000000000000..0897f1aa15754 --- /dev/null +++ b/e2e_test/background_ddl/common/create_bg_mv.slt @@ -0,0 +1,5 @@ +statement ok +SET BACKGROUND_DDL=true; + +statement ok +CREATE MATERIALIZED VIEW m1 as SELECT * FROM t; \ No newline at end of file diff --git a/e2e_test/background_ddl/common/create_fg_mv.slt b/e2e_test/background_ddl/common/create_fg_mv.slt new file mode 100644 index 0000000000000..b7c664d9df0d7 --- /dev/null +++ b/e2e_test/background_ddl/common/create_fg_mv.slt @@ -0,0 +1,2 @@ +statement ok +CREATE MATERIALIZED VIEW m1 as SELECT * FROM t; \ No newline at end of file diff --git a/e2e_test/background_ddl/common/create_index.slt b/e2e_test/background_ddl/common/create_index.slt new file mode 100644 index 0000000000000..95511a1c39866 --- /dev/null +++ b/e2e_test/background_ddl/common/create_index.slt @@ -0,0 +1,2 @@ +statement ok +CREATE INDEX i on t (v1); diff --git a/e2e_test/background_ddl/common/create_sink.slt b/e2e_test/background_ddl/common/create_sink.slt new file mode 100644 index 0000000000000..2f71eab5e5fdf --- /dev/null +++ b/e2e_test/background_ddl/common/create_sink.slt @@ -0,0 +1,2 @@ +statement ok +CREATE SINK i FROM t WITH (connector='blackhole'); diff --git a/e2e_test/background_ddl/common/create_table.slt b/e2e_test/background_ddl/common/create_table.slt new file mode 100644 index 0000000000000..b657041fd3ab9 --- /dev/null +++ b/e2e_test/background_ddl/common/create_table.slt @@ -0,0 +1,8 @@ +statement ok +CREATE TABLE t(v1 int); + +statement ok +INSERT INTO t SELECT * FROM generate_series(1, 500000); + +statement ok +FLUSH; \ No newline at end of file diff --git a/e2e_test/background_ddl/common/drop_index.slt b/e2e_test/background_ddl/common/drop_index.slt new file mode 100644 index 0000000000000..a92487e239416 --- /dev/null +++ b/e2e_test/background_ddl/common/drop_index.slt @@ -0,0 +1,2 @@ +statement ok +DROP INDEX i; \ No newline at end of file diff --git a/e2e_test/background_ddl/common/drop_mv.slt b/e2e_test/background_ddl/common/drop_mv.slt new file mode 100644 index 0000000000000..73792bf077ffb --- /dev/null +++ b/e2e_test/background_ddl/common/drop_mv.slt @@ -0,0 +1,2 @@ +statement ok +DROP MATERIALIZED VIEW m1; \ No newline at end of file diff --git a/e2e_test/background_ddl/common/drop_sink.slt b/e2e_test/background_ddl/common/drop_sink.slt new file mode 100644 index 0000000000000..04b99f0e02cb5 --- /dev/null +++ b/e2e_test/background_ddl/common/drop_sink.slt @@ -0,0 +1,2 @@ +statement ok +DROP SINK i; \ No newline at end of file diff --git a/e2e_test/background_ddl/common/drop_table.slt b/e2e_test/background_ddl/common/drop_table.slt new file mode 100644 index 0000000000000..eb37883178ef9 --- /dev/null +++ b/e2e_test/background_ddl/common/drop_table.slt @@ -0,0 +1,2 @@ +statement ok +DROP TABLE t; diff --git a/e2e_test/background_ddl/common/validate_backfilled_mv.slt b/e2e_test/background_ddl/common/validate_backfilled_mv.slt new file mode 100644 index 0000000000000..3b0e427ac454f --- /dev/null +++ b/e2e_test/background_ddl/common/validate_backfilled_mv.slt @@ -0,0 +1,4 @@ +query I +SELECT count(*) FROM m1; +---- +500000 \ No newline at end of file diff --git a/e2e_test/background_ddl/common/validate_no_jobs.slt b/e2e_test/background_ddl/common/validate_no_jobs.slt new file mode 100644 index 0000000000000..7833b205092ca --- /dev/null +++ b/e2e_test/background_ddl/common/validate_no_jobs.slt @@ -0,0 +1,4 @@ +query I +select count(*) from rw_catalog.rw_ddl_progress; +---- +0 \ No newline at end of file diff --git a/e2e_test/background_ddl/common/validate_one_job.slt b/e2e_test/background_ddl/common/validate_one_job.slt new file mode 100644 index 0000000000000..13c41f7612ab0 --- /dev/null +++ b/e2e_test/background_ddl/common/validate_one_job.slt @@ -0,0 +1,4 @@ +query I +select count(*) from rw_catalog.rw_ddl_progress; +---- +1 \ No newline at end of file diff --git a/e2e_test/background_ddl/sim.slt b/e2e_test/background_ddl/sim.slt new file mode 100644 index 0000000000000..3462716a17aef --- /dev/null +++ b/e2e_test/background_ddl/sim.slt @@ -0,0 +1 @@ +include ./sim/basic.slt \ No newline at end of file diff --git a/e2e_test/background_ddl/sim/basic.slt b/e2e_test/background_ddl/sim/basic.slt new file mode 100644 index 0000000000000..b58dd36604c06 --- /dev/null +++ b/e2e_test/background_ddl/sim/basic.slt @@ -0,0 +1,63 @@ +statement ok +SET BACKGROUND_DDL=true; + +statement ok +ALTER SYSTEM SET max_concurrent_creating_streaming_jobs TO 4; + +statement ok +CREATE TABLE t (v1 int); + +statement ok +INSERT INTO t select * from generate_series(1, 200000); + +statement ok +FLUSH; + +statement ok +CREATE MATERIALIZED VIEW m1 as SELECT * FROM t; + +statement ok +CREATE MATERIALIZED VIEW m2 as SELECT * FROM t; + +statement ok +CREATE MATERIALIZED VIEW m3 as SELECT * FROM t; + +# Meta should always reject duplicate mview. +statement error +CREATE MATERIALIZED VIEW m3 as SELECT * FROM t; + +# Wait for background ddl to finish +sleep 30s + +query I +select count(*) from m1; +---- +10000000 + +query I +select count(*) from m2; +---- +10000000 + +query I +select count(*) from m3; +---- +10000000 + +statement ok +DROP MATERIALIZED VIEW m1; + +statement ok +DROP MATERIALIZED VIEW m2; + +statement ok +DROP MATERIALIZED VIEW m3; + +statement ok +DROP TABLE t; + +statement ok +SET BACKGROUND_DDL=false; + +statement ok +ALTER SYSTEM SET max_concurrent_creating_streaming_jobs TO 1; diff --git a/e2e_test/batch/aggregate/with_over_window.slt.part b/e2e_test/batch/aggregate/with_over_window.slt.part new file mode 100644 index 0000000000000..a450b13837791 --- /dev/null +++ b/e2e_test/batch/aggregate/with_over_window.slt.part @@ -0,0 +1,87 @@ +statement ok +create table t (a int, b int, c int, d int, e int); + +statement ok +insert into t values + (1, 23, 84, 11, 87), + (2, 34, 29, 22, 98), + (3, 45, 43, 33, 10), + (4, 56, 83, 44, 26), + (5, 68, 20, 55, 12), + (5, 68, 90, 66, 34), + (5, 68, 11, 77, 32); + +query II +select + a, + sum((sum(b))) over (partition by a order by a) +from t +group by a +order by a; +---- +1 23 +2 34 +3 45 +4 56 +5 204 + +query II +select + a, + row_number() over (partition by a order by a) +from t +group by a +order by a; +---- +1 1 +2 1 +3 1 +4 1 +5 1 + +query II +select + a, + row_number() over (partition by a order by a desc) +from t +group by a +order by a; +---- +1 1 +2 1 +3 1 +4 1 +5 1 + +query III +select + a, + b, + sum(sum(c)) over (partition by a order by b) +from t +group by a, b +order by a, b; +---- +1 23 84 +2 34 29 +3 45 43 +4 56 83 +5 68 121 + +query III +select + a, + b, + sum(sum(c)) over (partition by a, avg(d) order by max(e), b) +from t +group by a, b +order by a, b; +---- +1 23 84 +2 34 29 +3 45 43 +4 56 83 +5 68 121 + +statement ok +drop table t; diff --git a/e2e_test/batch/basic/array.slt.part b/e2e_test/batch/basic/array.slt.part index 329bd7b05074e..3229118b6d001 100644 --- a/e2e_test/batch/basic/array.slt.part +++ b/e2e_test/batch/basic/array.slt.part @@ -134,8 +134,8 @@ double precision[] query T select pg_typeof(arr::varchar[]) from (values ('{1, 2, 3}'), ('{4, 5, 6}')) as t(arr); ---- -varchar[] -varchar[] +character varying[] +character varying[] # Test explicit cast to nested integer array query T diff --git a/e2e_test/batch/basic/cte.slt.part b/e2e_test/batch/basic/cte.slt.part index 8d1858ac0cd38..7bdd3407baa68 100644 --- a/e2e_test/batch/basic/cte.slt.part +++ b/e2e_test/batch/basic/cte.slt.part @@ -65,3 +65,14 @@ drop table t1; statement ok drop table t2; + +# more tests for alias https://github.com/risingwavelabs/risingwave/issues/12526 +query I +with cte as (select 1) select x from cte t(x); +---- +1 + +query I +with cte(a) as (select 1,2) select x,y from cte t(x,y); +---- +1 2 diff --git a/e2e_test/batch/basic/func.slt.part b/e2e_test/batch/basic/func.slt.part index 31ae11c1226d1..ebcce5ceb6a47 100644 --- a/e2e_test/batch/basic/func.slt.part +++ b/e2e_test/batch/basic/func.slt.part @@ -261,7 +261,7 @@ unknown query T select pg_typeof('123'::varchar); ---- -varchar +character varying query T select pg_typeof('123'::int); @@ -276,7 +276,7 @@ unknown query T select pg_typeof(null::varchar); ---- -varchar +character varying query T select pg_typeof(null::bool); @@ -392,6 +392,30 @@ select regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); ---- {abc,01234,xyz} +query T +select regexp_match(string, 'a') +from (values + ('abc'), + ('def'), + ('ghi') +) t(string); +---- +{a} +NULL +NULL + +query T +select regexp_match(string, pattern, flags) +from (values + ('abc', 'bc', ''), + ('abc', 'Bc', ''), + ('abc', 'Bc', 'i') +) t(string, pattern, flags); +---- +{bc} +NULL +{bc} + query T select regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); ---- @@ -504,10 +528,10 @@ select regexp_replace('abc123', 'abc', 'prefix\&suffix'); ---- prefixabcsuffix123 -query error invalid syntax for `regexp_replace` +query error invalid digit found in string select regexp_replace('foobarbaz', 'b..', 'X', 1, 'g'); -query error invalid parameters specified in regexp_replace +query error invalid digit found in string select regexp_replace('foobarbaz', 'b..', 'X', 'g', 1); # With Unicode @@ -546,6 +570,30 @@ select regexp_replace('💩💩💩💩💩foo🤔️bar亲爱的😭baz这不 ---- 💩💩💩💩💩foo🤔️bar亲爱的😭这是🥵爱情❤️‍🔥 +# Positive Lookahead +query T +select regexp_replace('foobarbaz', 'a(?=r)', 'X'); +---- +foobXrbaz + +# Negative Lookahead +query T +select regexp_replace('chocolate', 'o(?!c)', 'X'); +---- +chocXlate + +# Positive Lookbehind +query T +select regexp_replace('foobarXaz', '(?<=X)a', 'X'); +---- +foobarXXz + +# Negative Lookbehind +query T +select regexp_replace('foobarXaz', '(?= date '2021-01-01' as later_than_2021 from t2; +---- +1 t +2 t + +# `now()` filled for historical data should be the same +query II +select max(v1), count(*) from t2 group by v3 order by v3; +---- +2 2 -statement error db error: ERROR: QueryError: Bind error: impure default expr is not supported. -alter table t2 add column v3 timestamptz default now(); +statement ok +flush; + +statement ok +insert into t2 values (3); + +# Newly inserted record should have a later timestamp +query II +select max(v1), count(*) from t2 group by v3 order by v3; +---- +2 2 +3 1 + +# Historical data can be correctly updated +statement ok +update t2 set v3 = '2000-01-01 00:00:00+00:00' where v1 = 1; + +query II +select max(v1), count(*) from t2 group by v3 order by v3; +---- +1 1 +2 1 +3 1 statement ok drop table t1; diff --git a/e2e_test/batch/basic/unnest.slt.part b/e2e_test/batch/basic/unnest.slt.part index efcf9981e65c9..0807637c88e2d 100644 --- a/e2e_test/batch/basic/unnest.slt.part +++ b/e2e_test/batch/basic/unnest.slt.part @@ -83,3 +83,38 @@ select distinct unnest(array[1,1,2,3,1]) as x; 1 2 3 + +query I +select * from unnest(array[0,1,2]) with ordinality; +---- +0 1 +1 2 +2 3 + +query I +select * from unnest(array[0,1,2]) with ordinality, unnest(array[3,4]) with ordinality as unnest_2; +---- +0 1 3 1 +0 1 4 2 +1 2 3 1 +1 2 4 2 +2 3 3 1 +2 3 4 2 + +statement ok +create table t(arr varchar[]); + +statement ok +insert into t values (Array['a','b', 'c']), (Array['d','e']); + +query I rowsort +select * from t cross join unnest(t.arr) WITH ORDINALITY AS x(elts, num); +---- +{a,b,c} a 1 +{a,b,c} b 2 +{a,b,c} c 3 +{d,e} d 1 +{d,e} e 2 + +statement ok +drop table t; diff --git a/e2e_test/batch/catalog/metabase.slt.part b/e2e_test/batch/catalog/metabase.slt.part new file mode 100644 index 0000000000000..0172930f6e5f2 --- /dev/null +++ b/e2e_test/batch/catalog/metabase.slt.part @@ -0,0 +1,24 @@ +query +SELECT + NULL AS TABLE_CAT, + n.nspname AS TABLE_SCHEM, + ct.relname AS TABLE_NAME, + a.attname AS COLUMN_NAME, + ( + information_schema._pg_expandarray(i.indkey) + ).n AS KEY_SEQ, + ci.relname AS PK_NAME, + information_schema._pg_expandarray(i.indkey) AS KEYS, + a.attnum AS A_ATTNUM +FROM + pg_catalog.pg_class ct + JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) + JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) + JOIN pg_catalog.pg_index i ON (a.attrelid = i.indrelid) + JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) +WHERE + true + AND n.nspname = 'public' + AND ct.relname = 'sentences' + AND i.indisprimary +---- diff --git a/e2e_test/batch/catalog/pg_size.slt.part b/e2e_test/batch/catalog/pg_size.slt.part index 2dab96a689642..c274b7f297ee3 100644 --- a/e2e_test/batch/catalog/pg_size.slt.part +++ b/e2e_test/batch/catalog/pg_size.slt.part @@ -4,7 +4,7 @@ create table t (v1 int); statement ok insert into t values (3); -sleep 30s +sleep 3s skipif in-memory query T @@ -52,7 +52,7 @@ t statement ok create index t_idx on t (v1); -sleep 10s +sleep 3s skipif in-memory query T diff --git a/e2e_test/batch/catalog/pg_type.slt.part b/e2e_test/batch/catalog/pg_type.slt.part index e7a19d1566c4e..e35d21291bc14 100644 --- a/e2e_test/batch/catalog/pg_type.slt.part +++ b/e2e_test/batch/catalog/pg_type.slt.part @@ -1,20 +1,20 @@ query ITITT -SELECT oid, typname, typelem, typnotnull, typtype FROM pg_catalog.pg_type order by oid; +SELECT oid, typname, typelem, typnotnull, typtype, typinput FROM pg_catalog.pg_type order by oid; ---- -16 bool 0 f b -17 bytea 0 f b -20 int8 0 f b -21 int2 0 f b -23 int4 0 f b -25 text 0 f b -700 float4 0 f b -701 float8 0 f b -1043 varchar 0 f b -1082 date 0 f b -1083 time 0 f b -1114 timestamp 0 f b -1184 timestamptz 0 f b -1186 interval 0 f b -1301 rw_int256 0 f b -1700 numeric 0 f b -3802 jsonb 0 f b +16 bool 0 f b boolin +17 bytea 0 f b byteain +20 int8 0 f b int8in +21 int2 0 f b int2in +23 int4 0 f b int4in +25 text 0 f b textin +700 float4 0 f b float4in +701 float8 0 f b float8in +1043 varchar 0 f b varcharin +1082 date 0 f b date_in +1083 time 0 f b time_in +1114 timestamp 0 f b timestamp_in +1184 timestamptz 0 f b timestamptz_in +1186 interval 0 f b interval_in +1301 rw_int256 0 f b rw_int256_in +1700 numeric 0 f b numeric_in +3802 jsonb 0 f b jsonb_in diff --git a/e2e_test/batch/duckdb/all.slt.part b/e2e_test/batch/duckdb/all.slt.part index 81ed190749455..68adce73dea24 100644 --- a/e2e_test/batch/duckdb/all.slt.part +++ b/e2e_test/batch/duckdb/all.slt.part @@ -6,3 +6,4 @@ include ./conjunction/*.slt.part include ./conjunction/*/*.slt.part include ./limit/*.slt.part include ./select/*.slt.part +include ./cte/*.slt.part diff --git a/e2e_test/batch/duckdb/cte/insert_cte_bug_3417.test.slt.part b/e2e_test/batch/duckdb/cte/insert_cte_bug_3417.test.slt.part new file mode 100644 index 0000000000000..d4a6a955caf69 --- /dev/null +++ b/e2e_test/batch/duckdb/cte/insert_cte_bug_3417.test.slt.part @@ -0,0 +1,18 @@ +# name: test/sql/cte/insert_cte_bug_3417.test +# description: Test for a crash reported in issue #3417 +# group: [cte] + +statement ok +CREATE TABLE table1 (id INTEGER, a INTEGER); + +statement ok +CREATE TABLE table2 (table1_id INTEGER); + +statement error +INSERT INTO table2 WITH cte AS (INSERT INTO table1 SELECT 1, 2 RETURNING id) SELECT id FROM cte; + +statement ok +DROP TABLE table1; + +statement ok +DROP TABLE table2; diff --git a/e2e_test/batch/duckdb/cte/test_bug_922.test.slt.part b/e2e_test/batch/duckdb/cte/test_bug_922.test.slt.part new file mode 100644 index 0000000000000..371fc7b5b8ef0 --- /dev/null +++ b/e2e_test/batch/duckdb/cte/test_bug_922.test.slt.part @@ -0,0 +1,8 @@ +# name: test/sql/cte/test_bug_922.test +# description: Test for a crash reported in issue #922 +# group: [cte] + +query I +WITH my_list(value) AS (VALUES (1), (2), (3)) + SELECT * FROM my_list LIMIT 0 OFFSET 1 +---- diff --git a/e2e_test/batch/duckdb/cte/test_cte.test.slt.part b/e2e_test/batch/duckdb/cte/test_cte.test.slt.part new file mode 100644 index 0000000000000..4e073e8ebcacb --- /dev/null +++ b/e2e_test/batch/duckdb/cte/test_cte.test.slt.part @@ -0,0 +1,145 @@ +# name: test/sql/cte/test_cte.test +# description: Test Common Table Expressions (CTE) +# group: [cte] + +statement ok +SET RW_IMPLICIT_FLUSH TO TRUE; + +statement ok +create table a(i integer); + +statement ok +insert into a values (42); + +query I +with cte1 as (Select i as j from a) select * from cte1; +---- +42 + +query I +with cte1 as (Select i as j from a) select x from cte1 t1(x); +---- +42 + +query I +with cte1(xxx) as (Select i as j from a) select xxx from cte1; +---- +42 + +query I +with cte1(xxx) as (Select i as j from a) select x from cte1 t1(x); +---- +42 + +query II +with cte1 as (Select i as j from a), cte2 as (select ref.j as k from cte1 as ref), cte3 as (select ref2.j+1 as i from cte1 as ref2) select * from cte2 , cte3; +---- +42 43 + +query I rowsort +with cte1 as (select i as j from a), cte2 as (select ref.j as k from cte1 as ref), cte3 as (select ref2.j+1 as i from cte1 as ref2) select * from cte2 union all select * FROM cte3; +---- +42 +43 + + +# FIXME: this should be an error +# duplicate CTE alias +query I +with cte1 as (select 42), cte1 as (select 43) select * FROM cte1; +---- +43 + +# reference to CTE before its actually defined +# duckdb is ok +# postgres: query failed: db error: ERROR: relation "cte1" does not exist +# DETAIL: There is a WITH item named "cte1", but it cannot be referenced from this part of the query. +# HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references. +query error table or source not found: cte1 +with cte3 as (select ref2.j as i from cte1 as ref2), cte1 as (Select i as j from a), cte2 as (select ref.j+1 as k from cte1 as ref) select * from cte2 union all select * FROM cte3; + + +# multiple uses of same CTE +query II +with cte1 as (Select i as j from a) select * from cte1 cte11, cte1 cte12; +---- +42 42 + +# refer to CTE in subquery +query I +with cte1 as (Select i as j from a) select * from cte1 where j = (select max(j) from cte1 as cte2); +---- +42 + +# multi-column name alias +query II +with cte1(x, y) as (select 42 a, 84 b) select zzz, y from cte1 t1(zzz); +---- +42 84 + +# use a CTE in a view definition +statement ok +create view va AS (with cte as (Select i as j from a) select * from cte); + +query I +select * from va +---- +42 + +# nested CTE views that re-use CTE aliases +query I +with cte AS (SELECT * FROM va) SELECT * FROM cte; +---- +42 + +# multiple ctes in a view definition +statement ok +create view vb AS (with cte1 as (Select i as j from a), cte2 as (select ref.j+1 as k from cte1 as ref) select * from cte2); + +query I +select * from vb +---- +43 + +# cte in set operation node +query I +SELECT 1 UNION ALL (WITH cte AS (SELECT 42) SELECT * FROM cte); +---- +1 +42 + +# # cte in recursive cte +# query I +# WITH RECURSIVE cte(d) AS ( +# SELECT 1 +# UNION ALL +# (WITH c(d) AS (SELECT * FROM cte) +# SELECT d + 1 +# FROM c +# WHERE FALSE +# ) +# ) +# SELECT max(d) FROM cte; +# ---- +# 1 + +# test CTE with nested aliases in where clause +# Note: postgres doesn't support this: column "alias1" does not exist +query error failed to bind expression: alias1 +with cte (a) as ( + select 1 +) +select + a as alias1, + alias1 as alias2 +from cte +where alias2 > 0; + +statement ok +drop view vb; + +statement ok +drop view va; + +statement ok +drop table a; diff --git a/e2e_test/batch/duckdb/cte/test_cte_in_cte.test.slt.part b/e2e_test/batch/duckdb/cte/test_cte_in_cte.test.slt.part new file mode 100644 index 0000000000000..08c31f2b40f75 --- /dev/null +++ b/e2e_test/batch/duckdb/cte/test_cte_in_cte.test.slt.part @@ -0,0 +1,57 @@ +# name: test/sql/cte/test_cte_in_cte.test +# description: Test Nested Common Table Expressions (CTE) +# group: [cte] + +statement ok +SET RW_IMPLICIT_FLUSH TO TRUE; + +statement ok +create table a(i integer); + +statement ok +insert into a values (42); + +query I +with cte1 as (Select i as j from a) select * from cte1; +---- +42 + +query I +with cte1 as (with b as (Select i as j from a) Select j from b) select x from cte1 t1(x); +---- +42 + +query I +with cte1(xxx) as (with ncte(yyy) as (Select i as j from a) Select yyy from ncte) select xxx from cte1; +---- +42 + +query II +with cte1 as (with b as (Select i as j from a) select j from b), cte2 as (with c as (select ref.j+1 as k from cte1 as ref) select k from c) select * from cte1 , cte2; +---- +42 43 + +# refer to CTE in subquery tableref +query I +with cte1 as (Select i as j from a) select * from (with cte2 as (select max(j) as j from cte1) select * from cte2) f +---- +42 + +# refer to CTE in subquery expression +query I +with cte1 as (Select i as j from a) select * from cte1 where j = (with cte2 as (select max(j) as j from cte1) select j from cte2); +---- +42 + +# refer to same-named CTE in a subquery expression +query I +with cte as (Select i as j from a) select * from cte where j = (with cte as (select max(j) as j from cte) select j from cte); +---- +42 + +# self-refer to non-existent cte +statement error +with cte as (select * from cte) select * from cte + +statement ok +drop table a; diff --git a/e2e_test/batch/duckdb/cte/test_cte_overflow.test.slt.part b/e2e_test/batch/duckdb/cte/test_cte_overflow.test.slt.part new file mode 100644 index 0000000000000..4d27a9c7a3dfe --- /dev/null +++ b/e2e_test/batch/duckdb/cte/test_cte_overflow.test.slt.part @@ -0,0 +1,26 @@ +# name: test/sql/cte/test_cte_overflow.test +# description: Ensure no stack overflow for CTE names that match existing tables +# group: [cte] + +statement ok +SET RW_IMPLICIT_FLUSH TO TRUE; + +statement ok +create table a (id integer) + +statement ok +insert into a values (1729) + +statement ok +create view va as (with v as (select * from a) select * from v) + +query I +with a as (select * from va) select * from a +---- +1729 + +statement ok +drop view va; + +statement ok +drop table a; diff --git a/e2e_test/batch/duckdb/cte/test_issue_5673.test.slt.part b/e2e_test/batch/duckdb/cte/test_issue_5673.test.slt.part new file mode 100644 index 0000000000000..6fa4c3d8c23d6 --- /dev/null +++ b/e2e_test/batch/duckdb/cte/test_issue_5673.test.slt.part @@ -0,0 +1,54 @@ +# name: test/sql/cte/test_issue_5673.test +# description: Issue #5673 and #4987: CTE and Table name are name shadowing +# group: [cte] + +statement ok +SET RW_IMPLICIT_FLUSH TO TRUE; + +statement ok +create table orders(ordered_at int); + +statement ok +create table stg_orders(ordered_at int); + +statement ok +insert into orders values (1); + +statement ok +insert into stg_orders values (1); + +# Note: postgres succeeds. +# duckdb returns Binder Error: Circular reference to CTE "orders", There are two possible solutions. +query ok +with +orders as ( + select * from stg_orders + where ordered_at >= (select max(ordered_at) from orders) +), +some_more_logic as ( + select * + from orders +) +select * from some_more_logic; +---- +1 + +query I +with +orders as ( + select * from public.stg_orders + where ordered_at >= (select max(ordered_at) from public.orders) +), +some_more_logic as ( + select * + from orders +) +select * from some_more_logic; +---- +1 + +statement ok +drop table orders; + +statement ok +drop table stg_orders; diff --git a/e2e_test/batch/functions/array_sum.slt.part b/e2e_test/batch/functions/array_sum.slt.part new file mode 100644 index 0000000000000..11c126ef6a0d1 --- /dev/null +++ b/e2e_test/batch/functions/array_sum.slt.part @@ -0,0 +1,46 @@ +query I +select array_sum(array[1, 2, 3]); +---- +6 + +# Testing for SMALLINT with positive numbers +query I +select array_sum(array[10, 20, 30]::smallint[]); +---- +60 + +# Testing for SMALLINT with a mix of positive and negative numbers +query I +select array_sum(array[-10, 20, -30]::smallint[]); +---- +-20 + +# Testing for SMALLINT with all zeros +query I +select array_sum(array[0, 0, 0]::smallint[]); +---- +0 + +# Testing for INT with larger positive numbers +query I +select array_sum(array[1000, 2000, 3000]::int[]); +---- +6000 + +# Testing for INT with a mix of larger positive and negative numbers +query I +select array_sum(array[-1000, 2000, -3000]::int[]); +---- +-2000 + +# Testing for BIGINT with much larger numbers +query I +select array_sum(array[1000000000, 2000000000, 3000000000]::bigint[]); +---- +6000000000 + +# Testing for BIGINT with a mix of much larger positive and negative numbers +query I +select array_sum(array[-1000000000, 2000000000, -3000000000]::bigint[]); +---- +-2000000000 diff --git a/e2e_test/batch/functions/cast.slt.part b/e2e_test/batch/functions/cast.slt.part new file mode 100644 index 0000000000000..a0c8b31eec83d --- /dev/null +++ b/e2e_test/batch/functions/cast.slt.part @@ -0,0 +1,40 @@ +statement ok +create table dt (a date, b varchar); + +statement ok +insert into dt values (date('2020-01-23'), '1990-12-19'); + +query TT +select + date(b) +from +dt +except +select + cast(b as date) +from dt +except +select + b::date +from dt; +---- + +statement ok +drop table dt; + +query T +select date('2030-03-30'); +---- +2030-03-30 + +query error +select date('2000-13-03'); + +query error +select date('00-00-33'); + +query error cannot cast type "integer" to "date" in Explicit context +select date(1); + +query error unexpected arguments number +select date(); diff --git a/e2e_test/batch/functions/format.slt.part b/e2e_test/batch/functions/format.slt.part index 92b4fc1553a65..ab6090737e304 100644 --- a/e2e_test/batch/functions/format.slt.part +++ b/e2e_test/batch/functions/format.slt.part @@ -7,3 +7,26 @@ query T SELECT format('Testing %s, %s, %s, %%', 'one', 'two', 'three'); ---- Testing one, two, three, % + +query T +SELECT format('%s %s', a, b) from (values + ('Hello', 'World'), + ('Rising', 'Wave') +) as t(a, b); +---- +Hello World +Rising Wave + +query T +SELECT format(f, a, b) from (values + ('%s %s', 'Hello', 'World'), + ('%s%s', 'Hello', null), + (null, 'Hello', 'World') +) as t(f, a, b); +---- +Hello World +Hello +NULL + +query error too few arguments for format() +SELECT format('%s %s', 'Hello'); diff --git a/e2e_test/batch/functions/format_type.slt.part b/e2e_test/batch/functions/format_type.slt.part index 2f762e7fe7750..2e4079a3ea5b9 100644 --- a/e2e_test/batch/functions/format_type.slt.part +++ b/e2e_test/batch/functions/format_type.slt.part @@ -17,3 +17,8 @@ query T SELECT format_type(NULL, 0); ---- NULL + +query T +SELECT format_type(1043, '-1'); +---- +character varying diff --git a/e2e_test/batch/functions/greatest_least.slt.part b/e2e_test/batch/functions/greatest_least.slt.part new file mode 100644 index 0000000000000..43a7996fe7df8 --- /dev/null +++ b/e2e_test/batch/functions/greatest_least.slt.part @@ -0,0 +1,73 @@ +statement ok +create table t(id int, v1 int2, v2 int4, v3 int8); + +statement ok +insert into t values (1, 1, 2, 3), (2, 2, NULL, 5), (3, NULL, NULL, 8), (4, NULL, NULL, NULL); + +statement ok +flush; + +statement error +select greatest(v1, '123'); + +statement error +select greatest(); + +statement error +select least(); + +query I +select greatest(1, 2, 3); +---- +3 + +query I +select greatest(2); +---- +2 + +query I +select least(1, 2, 3); +---- +1 + +query I +select least(2); +---- +2 + +query I +select greatest(v1, v2, v3) from t order by id; +---- +3 +5 +8 +NULL + +query I +select least(v1, v2, v3) from t order by id; +---- +1 +2 +8 +NULL + +query I +select greatest(7, v3) from t order by id; +---- +7 +7 +8 +7 + +query I +select least(NULL, v1, 2) from t order by id; +---- +1 +2 +2 +2 + + +statement ok +drop table t; \ No newline at end of file diff --git a/e2e_test/batch/issue_7324.slt b/e2e_test/batch/issue_7324.slt index c5ac2636cc44b..76f70ee0443ed 100644 --- a/e2e_test/batch/issue_7324.slt +++ b/e2e_test/batch/issue_7324.slt @@ -3,9 +3,6 @@ statement ok SET RW_IMPLICIT_FLUSH TO true; -statement ok -SET CREATE_COMPACTION_GROUP_FOR_MV TO true; - statement ok CREATE TABLE INT2_TBL(f1 int2); diff --git a/e2e_test/batch/subquery/implict_lateral_table_function.slt.part b/e2e_test/batch/subquery/implict_lateral_table_function.slt.part new file mode 100644 index 0000000000000..3b3dc0486f8a6 --- /dev/null +++ b/e2e_test/batch/subquery/implict_lateral_table_function.slt.part @@ -0,0 +1,24 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table t(x int , arr int[]); + +statement ok +insert into t values (1, Array[11,111,1111]), (2, Array[22,222,2222]); + + +query IAI rowsort +select * from t cross join unnest(arr); +---- +1 {11,111,1111} 11 +1 {11,111,1111} 111 +1 {11,111,1111} 1111 +2 {22,222,2222} 22 +2 {22,222,2222} 222 +2 {22,222,2222} 2222 + +statement ok +drop table t; + + diff --git a/e2e_test/batch/subquery/subquery_with_hop_window.slt.part b/e2e_test/batch/subquery/subquery_with_hop_window.slt.part new file mode 100644 index 0000000000000..d4dba50fa37c0 --- /dev/null +++ b/e2e_test/batch/subquery/subquery_with_hop_window.slt.part @@ -0,0 +1,19 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table t1 (k int primary key, ts timestamp); + +statement ok +insert into t1 values (1, '2021-01-01 10:15:00'); + +query IITTT rowsort +select * from (select 1 as col union select 2) u , lateral(select * from hop(t1, ts, interval '10' minute, interval '30' minute) where col = k); +---- +1 1 2021-01-01 10:15:00 2021-01-01 09:50:00 2021-01-01 10:20:00 +1 1 2021-01-01 10:15:00 2021-01-01 10:00:00 2021-01-01 10:30:00 +1 1 2021-01-01 10:15:00 2021-01-01 10:10:00 2021-01-01 10:40:00 + +statement ok +drop table t1; + diff --git a/e2e_test/batch/subquery/tab_completion.slt.part b/e2e_test/batch/subquery/tab_completion.slt.part new file mode 100644 index 0000000000000..60d87d6ff5e69 --- /dev/null +++ b/e2e_test/batch/subquery/tab_completion.slt.part @@ -0,0 +1,25 @@ +# queries from psql tab completion + +statement ok +create table ttttt(x int); + +statement ok +create table tttt(x int); + +# select * from tt +query I rowsort +SELECT pg_catalog.quote_ident(c.relname) FROM pg_catalog.pg_class c WHERE c.relkind IN ('r', 'S', 'v', 'm', 'f', 'p') AND substring(pg_catalog.quote_ident(c.relname),1,2)='tt' AND pg_catalog.pg_table_is_visible(c.oid) AND c.relnamespace <> (SELECT oid FROM pg_catalog.pg_namespace WHERE nspname = 'pg_catalog') +UNION +SELECT pg_catalog.quote_ident(n.nspname) || '.' FROM pg_catalog.pg_namespace n WHERE substring(pg_catalog.quote_ident(n.nspname) || '.',1,2)='tt' AND (SELECT pg_catalog.count(*) FROM pg_catalog.pg_namespace WHERE substring(pg_catalog.quote_ident(nspname) || '.',1,2) = substring('tt',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) > 1 +UNION +SELECT pg_catalog.quote_ident(n.nspname) || '.' || pg_catalog.quote_ident(c.relname) FROM pg_catalog.pg_class c, pg_catalog.pg_namespace n WHERE c.relnamespace = n.oid AND c.relkind IN ('r', 'S', 'v', 'm', 'f', 'p') AND substring(pg_catalog.quote_ident(n.nspname) || '.' || pg_catalog.quote_ident(c.relname),1,2)='tt' AND substring(pg_catalog.quote_ident(n.nspname) || '.',1,2) = substring('tt',1,pg_catalog.length(pg_catalog.quote_ident(n.nspname))+1) AND (SELECT pg_catalog.count(*) FROM pg_catalog.pg_namespace WHERE substring(pg_catalog.quote_ident(nspname) || '.',1,2) = substring('tt',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) = 1 +LIMIT 1000 +---- +tttt +ttttt + +statement ok +drop table tttt; + +statement ok +drop table ttttt; diff --git a/e2e_test/batch/tpch.slt b/e2e_test/batch/tpch.slt index b952fa798e96f..e895e30826104 100644 --- a/e2e_test/batch/tpch.slt +++ b/e2e_test/batch/tpch.slt @@ -44,9 +44,6 @@ include ../tpch/drop_tables.slt.part statement ok SET RW_IMPLICIT_FLUSH TO true; -statement ok -SET CREATE_COMPACTION_GROUP_FOR_MV TO true; - statement ok SET QUERY_MODE TO distributed; diff --git a/e2e_test/batch/transaction/now.slt b/e2e_test/batch/transaction/now.slt index d1be437ba371c..4f8d317f04261 100644 --- a/e2e_test/batch/transaction/now.slt +++ b/e2e_test/batch/transaction/now.slt @@ -1,5 +1,3 @@ -# Disabled, see https://github.com/risingwavelabs/risingwave/issues/10887 - statement ok create table t (ts timestamp); diff --git a/e2e_test/ddl/alter_rename_relation.slt b/e2e_test/ddl/alter_rename_relation.slt index fe4f16fec12bd..df085a273b0a9 100644 --- a/e2e_test/ddl/alter_rename_relation.slt +++ b/e2e_test/ddl/alter_rename_relation.slt @@ -128,6 +128,11 @@ public.mv_on_v1 CREATE MATERIALIZED VIEW mv_on_v1 AS SELECT * FROM v5 AS v1 statement ok ALTER INDEX idx RENAME TO idx1; +query TT +SHOW CREATE INDEX idx1; +---- +public.idx1 CREATE INDEX idx1 ON t2(v1) + statement ok INSERT INTO t2 VALUES(1,(1,(1,2))); diff --git a/e2e_test/ddl/search_path.slt b/e2e_test/ddl/search_path.slt index bbb927bdd8976..06db7f3f45c90 100644 --- a/e2e_test/ddl/search_path.slt +++ b/e2e_test/ddl/search_path.slt @@ -101,5 +101,44 @@ drop schema search_path_test1; statement ok drop schema search_path_test2; +# Schema for functions https://github.com/risingwavelabs/risingwave/issues/12422 + +query TI +select * from information_schema._pg_expandarray(Array['a','b','c']) +---- +a 1 +b 2 +c 3 + +# FIXME: This should not be available since information_schema is not in the search path +query TI +select * from _pg_expandarray(Array['a','b','c']) +---- +a 1 +b 2 +c 3 + + +statement ok +set search_path to information_schema; + +query TI +select * from _pg_expandarray(Array['a','b','c']) +---- +a 1 +b 2 +c 3 + +# built-in functions (pg_catalog) are always available +query I +select abs(1) +---- +1 + +query I +select pg_catalog.abs(1) +---- +1 + statement ok set search_path to "$user", public; diff --git a/e2e_test/ddl/show.slt b/e2e_test/ddl/show.slt index 787aacddb0330..5ae7575668645 100644 --- a/e2e_test/ddl/show.slt +++ b/e2e_test/ddl/show.slt @@ -7,20 +7,23 @@ create materialized view mv3 as select sum(v1) as sum_v1 from t3; statement ok create view v3 as select sum(v2) as sum_v2 from t3; -query TT +query TTT describe t3; ---- -v1 integer -v2 integer -v3 integer -primary key _row_id +v1 integer false +v2 integer false +v3 integer false +_row_id serial true +primary key _row_id NULL +distribution key _row_id NULL -query TT +query TTT show columns from t3; ---- -v1 integer -v2 integer -v3 integer +v1 integer false +v2 integer false +v3 integer false +_row_id serial true statement ok create index idx1 on t3 (v1,v2); @@ -30,14 +33,16 @@ show indexes from t3; ---- idx1 t3 v1 ASC, v2 ASC v3 v1 -query TT +query TTT describe t3; ---- -v1 integer -v2 integer -v3 integer -primary key _row_id -idx1 index(v1 ASC, v2 ASC) include(v3) distributed by(v1) +v1 integer false +v2 integer false +v3 integer false +_row_id serial true +primary key _row_id NULL +distribution key _row_id NULL +idx1 index(v1 ASC, v2 ASC) include(v3) distributed by(v1) NULL query TT show create index idx1; diff --git a/e2e_test/ddl/table/generated_columns.slt.part b/e2e_test/ddl/table/generated_columns.slt.part index 08f07eb88f670..feea107fe9ac1 100644 --- a/e2e_test/ddl/table/generated_columns.slt.part +++ b/e2e_test/ddl/table/generated_columns.slt.part @@ -151,7 +151,7 @@ CREATE TABLE t (v INT, t timestamptz as now()) WITH ( ) FORMAT PLAIN ENCODE JSON; # create a table with impure generated column as pk. -statement error QueryError: Bind error: Generated columns should not be part of the primary key. Here column "v2" is defined as part of the primary key. +statement error QueryError: Bind error: Generated columns with impure expressions should not be part of the primary key. Here column "v2" is defined as part of the primary key. CREATE TABLE t ( v1 INT, v2 timestamptz AS proctime(), diff --git a/e2e_test/extended_mode/basic.slt b/e2e_test/extended_mode/basic.slt index 51513a444ec79..7869494979e47 100644 --- a/e2e_test/extended_mode/basic.slt +++ b/e2e_test/extended_mode/basic.slt @@ -39,20 +39,23 @@ values(round(42.4382)); statement ok create table t3 (v1 int, v2 int, v3 int); -query TT +query TTT describe t3; ---- -v1 integer -v2 integer -v3 integer -primary key _row_id - -query TT +v1 integer false +v2 integer false +v3 integer false +_row_id serial true +primary key _row_id NULL +distribution key _row_id NULL + +query TTT show columns from t3; ---- -v1 integer -v2 integer -v3 integer +v1 integer false +v2 integer false +v3 integer false +_row_id serial true statement ok drop table t3; diff --git a/e2e_test/iceberg/config.ini b/e2e_test/iceberg/config.ini index 6fa1ffbdc6832..bd95eddc5b80e 100644 --- a/e2e_test/iceberg/config.ini +++ b/e2e_test/iceberg/config.ini @@ -1,6 +1,3 @@ -[default] -result = data.csv - [spark] url=sc://localhost:15002 diff --git a/e2e_test/iceberg/data.csv b/e2e_test/iceberg/data.csv deleted file mode 100644 index 77ad8f16dbc9d..0000000000000 --- a/e2e_test/iceberg/data.csv +++ /dev/null @@ -1,5 +0,0 @@ -1,1,1000,1.1,1.11,1-1,true,2022-03-11,2022-03-11 01:00:00+00:00,2022-03-11 01:00:00 -2,2,2000,2.2,2.22,2-2,false,2022-03-12,2022-03-12 02:00:00+00:00,2022-03-12 02:00:00 -3,3,3000,3.3,3.33,3-3,true,2022-03-13,2022-03-13 03:00:00+00:00,2022-03-13 03:00:00 -4,4,4000,4.4,4.44,4-4,false,2022-03-14,2022-03-14 04:00:00+00:00,2022-03-14 04:00:00 -5,5,5000,5.5,5.55,5-5,true,2022-03-15,2022-03-15 05:00:00+00:00,2022-03-15 05:00:00 diff --git a/e2e_test/iceberg/main.py b/e2e_test/iceberg/main.py index 304962ef08ccc..3f3120227e6e7 100644 --- a/e2e_test/iceberg/main.py +++ b/e2e_test/iceberg/main.py @@ -1,9 +1,11 @@ from pyspark.sql import SparkSession +import argparse import configparser import subprocess import csv import unittest import time +import tomli as toml from datetime import date from datetime import datetime from datetime import timezone @@ -23,25 +25,6 @@ def strtots(v): g_spark = None -init_table_sqls = [ - "CREATE SCHEMA IF NOT EXISTS demo_db", - "DROP TABLE IF EXISTS demo_db.demo_table", - """ - CREATE TABLE demo_db.demo_table ( - id long, - v_int int, - v_long long, - v_float float, - v_double double, - v_varchar string, - v_bool boolean, - v_date date, - v_timestamp timestamp, - v_ts_ntz timestamp_ntz - ) TBLPROPERTIES ('format-version'='2'); - """, -] - def get_spark(args): spark_config = args['spark'] @@ -52,54 +35,83 @@ def get_spark(args): return g_spark -def init_iceberg_table(args): +def init_iceberg_table(args,init_sqls): spark = get_spark(args) - for sql in init_table_sqls: + for sql in init_sqls: print(f"Executing sql: {sql}") spark.sql(sql) -def init_risingwave_mv(args): +def execute_slt(args,slt): + if slt is None or slt == "": + return rw_config = args['risingwave'] - cmd = f"sqllogictest -p {rw_config['port']} -d {rw_config['db']} iceberg_sink_v2.slt" + cmd = f"sqllogictest -p {rw_config['port']} -d {rw_config['db']} {slt}" print(f"Command line is [{cmd}]") subprocess.run(cmd, shell=True, check=True) - time.sleep(60) + time.sleep(30) -def verify_result(args): - sql = "SELECT * FROM demo_db.demo_table ORDER BY id ASC" +def verify_result(args,verify_sql,verify_schema,verify_data): tc = unittest.TestCase() - print(f"Executing sql: {sql}") + print(f"Executing sql: {verify_sql}") spark = get_spark(args) - df = spark.sql(sql).collect() + df = spark.sql(verify_sql).collect() for row in df: print(row) - - with open(args['default']['result'], newline='') as csv_file: - csv_result = list(csv.reader(csv_file)) - for (row1, row2) in zip(df, csv_result): - print(f"Row1: {row1}, row 2: {row2}") - tc.assertEqual(row1[0], int(row2[0])) - tc.assertEqual(row1[1], int(row2[1])) - tc.assertEqual(row1[2], int(row2[2])) - tc.assertEqual(round(row1[3], 5), round(float(row2[3]), 5)) - tc.assertEqual(round(row1[4], 5), round(float(row2[4]), 5)) - tc.assertEqual(row1[5], row2[5]) - tc.assertEqual(row1[6], strtobool(row2[6])) - tc.assertEqual(row1[7], strtodate(row2[7])) - tc.assertEqual(row1[8].astimezone(timezone.utc).replace(tzinfo=None), strtots(row2[8])) - tc.assertEqual(row1[9], datetime.fromisoformat(row2[9])) - - tc.assertEqual(len(df), len(csv_result)) - + rows = verify_data.splitlines() + tc.assertEqual(len(df), len(rows)) + for (row1, row2) in zip(df, rows): + print(f"Row1: {row1}, Row 2: {row2}") + row2 = row2.split(',') + for idx, ty in enumerate(verify_schema): + if ty == "int" or ty == "long": + tc.assertEqual(row1[idx], int(row2[idx])) + elif ty == "float" or ty == "double": + tc.assertEqual(round(row1[idx], 5), round(float(row2[idx]), 5)) + elif ty == "boolean": + tc.assertEqual(row1[idx], strtobool(row2[idx])) + elif ty == "date": + tc.assertEqual(row1[idx], strtodate(row2[idx])) + elif ty == "timestamp": + tc.assertEqual(row1[idx].astimezone(timezone.utc).replace(tzinfo=None), strtots(row2[idx])) + elif ty == "timestamp_ntz": + tc.assertEqual(row1[idx], datetime.fromisoformat(row2[idx])) + elif ty == "string": + tc.assertEqual(row1[idx], row2[idx]) + else: + tc.fail(f"Unsupported type {ty}") + +def drop_table(args,drop_sqls): + spark = get_spark(args) + for sql in drop_sqls: + print(f"Executing sql: {sql}") + spark.sql(sql) if __name__ == "__main__": - config = configparser.ConfigParser() - config.read("config.ini") - print({section: dict(config[section]) for section in config.sections()}) - init_iceberg_table(config) - init_risingwave_mv(config) - verify_result(config) + parser = argparse.ArgumentParser(description="Test script for iceberg") + parser.add_argument("-t", dest="test_case", type=str, help="Test case file") + with open(parser.parse_args().test_case,"rb") as test_case: + test_case = toml.load(test_case) + # Extract content from testcase + init_sqls = test_case['init_sqls'] + print(f"init_sqls:{init_sqls}") + slt = test_case['slt'] + print(f"slt:{slt}") + verify_schema = test_case['verify_schema'] + print(f"verify_schema:{verify_schema}") + verify_sql = test_case['verify_sql'] + print(f"verify_sql:{verify_sql}") + verify_data = test_case['verify_data'] + drop_sqls = test_case['drop_sqls'] + + config = configparser.ConfigParser() + config.read("config.ini") + print({section: dict(config[section]) for section in config.sections()}) + + init_iceberg_table(config,init_sqls) + execute_slt(config,slt) + verify_result(config,verify_sql,verify_schema,verify_data) + drop_table(config,drop_sqls) diff --git a/e2e_test/iceberg/pyproject.toml b/e2e_test/iceberg/pyproject.toml index 7b19ed7b044f5..d13be72277592 100644 --- a/e2e_test/iceberg/pyproject.toml +++ b/e2e_test/iceberg/pyproject.toml @@ -7,7 +7,7 @@ authors = ["risingwavelabs"] [tool.poetry.dependencies] python = "^3.10" pyspark = { version = "3.4.1", extras = ["sql", "connect"] } - +tomli = "2.0" [build-system] diff --git a/e2e_test/iceberg/test_case/cdc/load.slt b/e2e_test/iceberg/test_case/cdc/load.slt new file mode 100644 index 0000000000000..caefd1326bbda --- /dev/null +++ b/e2e_test/iceberg/test_case/cdc/load.slt @@ -0,0 +1,46 @@ +# CDC source basic test + +# enable cdc backfill in ci +statement ok +set cdc_backfill='true'; + +statement ok +create table products ( id INT, + name STRING, + description STRING, + PRIMARY KEY (id) +) with ( + connector = 'mysql-cdc', + hostname = 'mysql', + port = '3306', + username = 'root', + password = '123456', + database.name = 'my@db', + table.name = 'products', + server.id = '5085' +); + + +statement ok +CREATE SINK s1 AS select * from products WITH ( + connector = 'iceberg', + type = 'upsert', + force_append_only = 'false', + database.name = 'demo', + table.name = 'demo_db.demo_table', + catalog.type = 'storage', + warehouse.path = 's3://icebergdata/demo', + s3.endpoint = 'http://127.0.0.1:9301', + s3.region = 'us-east-1', + s3.access.key = 'hummockadmin', + s3.secret.key = 'hummockadmin', + primary_key = 'id' +); + +query I +select count(*) from products; +---- +8 + +statement ok +flush; diff --git a/e2e_test/iceberg/test_case/cdc/mysql_cdc.sql b/e2e_test/iceberg/test_case/cdc/mysql_cdc.sql new file mode 100644 index 0000000000000..b7b6f13af83cf --- /dev/null +++ b/e2e_test/iceberg/test_case/cdc/mysql_cdc.sql @@ -0,0 +1,21 @@ +DROP DATABASE IF EXISTS `my@db`; +CREATE DATABASE `my@db`; + +USE `my@db`; + +CREATE TABLE products ( + id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) NOT NULL, + description VARCHAR(512) +); + +ALTER TABLE products AUTO_INCREMENT = 101; + +INSERT INTO products VALUES (default,"101","101"), +(default,"102","102"), +(default,"103","103"), +(default,"104","104"), +(default,"105","105"), +(default,"106","106"), +(default,"107","107"), +(default,"108","108") diff --git a/e2e_test/iceberg/test_case/cdc/mysql_cdc_insert.sql b/e2e_test/iceberg/test_case/cdc/mysql_cdc_insert.sql new file mode 100644 index 0000000000000..641d6220ea8dc --- /dev/null +++ b/e2e_test/iceberg/test_case/cdc/mysql_cdc_insert.sql @@ -0,0 +1,7 @@ +USE `my@db`; + +INSERT INTO products VALUES (default,"109","109"), +(default,"110","110"), +(default,"111","111"), +(default,"112","112"), +(default,"113","113"); diff --git a/e2e_test/iceberg/test_case/cdc/no_partition_cdc.toml b/e2e_test/iceberg/test_case/cdc/no_partition_cdc.toml new file mode 100644 index 0000000000000..5ab9647b12eb0 --- /dev/null +++ b/e2e_test/iceberg/test_case/cdc/no_partition_cdc.toml @@ -0,0 +1,25 @@ +init_sqls = [] + +slt = '' + +verify_schema = ['int','string','string'] + +verify_sql = 'SELECT * FROM demo_db.demo_table ORDER BY id ASC' + +verify_data = """ +101,101,101 +102,102,102 +103,103,103 +104,104,104 +105,105,105 +106,106,106 +107,107,107 +108,108,108 +109,109,109 +110,110,110 +111,111,111 +112,112,112 +113,113,113 +""" + +drop_sqls = [] diff --git a/e2e_test/iceberg/test_case/cdc/no_partition_cdc_init.toml b/e2e_test/iceberg/test_case/cdc/no_partition_cdc_init.toml new file mode 100644 index 0000000000000..17e5f7497aae5 --- /dev/null +++ b/e2e_test/iceberg/test_case/cdc/no_partition_cdc_init.toml @@ -0,0 +1,31 @@ +init_sqls = [ + 'CREATE SCHEMA IF NOT EXISTS demo_db', + 'DROP TABLE IF EXISTS demo_db.demo_table', + ''' + CREATE TABLE demo_db.demo_table ( + id int, + name string, + description string + ) USING iceberg + TBLPROPERTIES ('format-version'='2'); + ''' +] + +slt = 'test_case/cdc/load.slt' + +verify_schema = ['int','string','string'] + +verify_sql = 'SELECT * FROM demo_db.demo_table ORDER BY id ASC' + +verify_data = """ +101,101,101 +102,102,102 +103,103,103 +104,104,104 +105,105,105 +106,106,106 +107,107,107 +108,108,108 +""" + +drop_sqls = [] diff --git a/e2e_test/iceberg/iceberg_sink_v2.slt b/e2e_test/iceberg/test_case/iceberg_sink_append_only.slt similarity index 89% rename from e2e_test/iceberg/iceberg_sink_v2.slt rename to e2e_test/iceberg/test_case/iceberg_sink_append_only.slt index 59af62b5a1b46..5f847eaa30a7e 100644 --- a/e2e_test/iceberg/iceberg_sink_v2.slt +++ b/e2e_test/iceberg/test_case/iceberg_sink_append_only.slt @@ -23,12 +23,14 @@ CREATE SINK s6 AS select * from mv6 WITH ( connector = 'iceberg', type = 'append-only', force_append_only = 'true', + database.name = 'demo', + table.name = 'demo_db.demo_table', + catalog.type = 'storage', warehouse.path = 's3://icebergdata/demo', s3.endpoint = 'http://127.0.0.1:9301', + s3.region = 'us-east-1', s3.access.key = 'hummockadmin', - s3.secret.key = 'hummockadmin', - database.name='demo_db', - table.name='demo_table' + s3.secret.key = 'hummockadmin' ); statement ok diff --git a/e2e_test/iceberg/test_case/iceberg_sink_upsert.slt b/e2e_test/iceberg/test_case/iceberg_sink_upsert.slt new file mode 100644 index 0000000000000..646a39cc08e28 --- /dev/null +++ b/e2e_test/iceberg/test_case/iceberg_sink_upsert.slt @@ -0,0 +1,45 @@ +statement ok +set streaming_parallelism=4; + +statement ok +CREATE TABLE t6 (id int, v1 int primary key, v2 bigint, v3 varchar); + +statement ok +CREATE MATERIALIZED VIEW mv6 AS SELECT * FROM t6; + +statement ok +CREATE SINK s6 AS select mv6.id as id, mv6.v1 as v1, mv6.v2 as v2, mv6.v3 as v3 from mv6 WITH ( + connector = 'iceberg', + type = 'upsert', + force_append_only = 'false', + database.name = 'demo', + table.name = 'demo_db.demo_table', + catalog.type = 'storage', + warehouse.path = 's3://icebergdata/demo', + s3.endpoint = 'http://127.0.0.1:9301', + s3.region = 'us-east-1', + s3.access.key = 'hummockadmin', + s3.secret.key = 'hummockadmin', + primary_key = 'v1' +); + +statement ok +INSERT INTO t6 VALUES (1, 1, 2, '1-2'), (1, 2, 2, '2-2'), (1, 3, 2, '3-2'), (1, 5, 2, '5-2'), (1, 8, 2, '8-2'), (1, 13, 2, '13-2'), (1, 21, 2, '21-2'); + +statement ok +FLUSH; + +statement ok +INSERT INTO t6 VALUES (1, 1, 50, '1-50'); + +statement ok +FLUSH; + +statement ok +DROP SINK s6; + +statement ok +DROP MATERIALIZED VIEW mv6; + +statement ok +DROP TABLE t6; diff --git a/e2e_test/iceberg/test_case/no_partition_append_only.toml b/e2e_test/iceberg/test_case/no_partition_append_only.toml new file mode 100644 index 0000000000000..211407644abec --- /dev/null +++ b/e2e_test/iceberg/test_case/no_partition_append_only.toml @@ -0,0 +1,38 @@ +init_sqls = [ + 'CREATE SCHEMA IF NOT EXISTS demo_db', + 'DROP TABLE IF EXISTS demo_db.demo_table', + ''' + CREATE TABLE demo_db.demo_table ( + id long, + v_int int, + v_long long, + v_float float, + v_double double, + v_varchar string, + v_bool boolean, + v_date date, + v_timestamp timestamp, + v_ts_ntz timestamp_ntz + ) TBLPROPERTIES ('format-version'='2'); + ''' +] + +slt = 'test_case/iceberg_sink_append_only.slt' + +verify_schema = ['long', 'int', 'long', 'float', 'double', 'string', 'boolean', 'date', 'timestamp', 'timestamp_ntz'] + +verify_sql = 'SELECT * FROM demo_db.demo_table ORDER BY id ASC' + + +verify_data = """ +1,1,1000,1.1,1.11,1-1,true,2022-03-11,2022-03-11 01:00:00+00:00,2022-03-11 01:00:00 +2,2,2000,2.2,2.22,2-2,false,2022-03-12,2022-03-12 02:00:00+00:00,2022-03-12 02:00:00 +3,3,3000,3.3,3.33,3-3,true,2022-03-13,2022-03-13 03:00:00+00:00,2022-03-13 03:00:00 +4,4,4000,4.4,4.44,4-4,false,2022-03-14,2022-03-14 04:00:00+00:00,2022-03-14 04:00:00 +5,5,5000,5.5,5.55,5-5,true,2022-03-15,2022-03-15 05:00:00+00:00,2022-03-15 05:00:00 +""" + +drop_sqls = [ + 'DROP TABLE IF EXISTS demo_db.demo_table', + 'DROP SCHEMA IF EXISTS demo_db' +] diff --git a/e2e_test/iceberg/test_case/no_partition_upsert.toml b/e2e_test/iceberg/test_case/no_partition_upsert.toml new file mode 100644 index 0000000000000..0e0215d37465d --- /dev/null +++ b/e2e_test/iceberg/test_case/no_partition_upsert.toml @@ -0,0 +1,34 @@ +init_sqls = [ + 'CREATE SCHEMA IF NOT EXISTS demo_db', + 'DROP TABLE IF EXISTS demo_db.demo_table', + ''' + CREATE TABLE demo_db.demo_table ( + id int, + v1 int, + v2 long, + v3 string + ) USING iceberg + TBLPROPERTIES ('format-version'='2'); + ''' +] + +slt = 'test_case/iceberg_sink_upsert.slt' + +verify_schema = ['int','int','long','string'] + +verify_sql = 'SELECT * FROM demo_db.demo_table ORDER BY id, v1 ASC' + +verify_data = """ +1,1,50,1-50 +1,2,2,2-2 +1,3,2,3-2 +1,5,2,5-2 +1,8,2,8-2 +1,13,2,13-2 +1,21,2,21-2 +""" + +drop_sqls = [ + 'DROP TABLE IF EXISTS demo_db.demo_table', + 'DROP SCHEMA IF EXISTS demo_db' +] diff --git a/e2e_test/iceberg/test_case/partition_append_only.toml b/e2e_test/iceberg/test_case/partition_append_only.toml new file mode 100644 index 0000000000000..4721ef11c5ba6 --- /dev/null +++ b/e2e_test/iceberg/test_case/partition_append_only.toml @@ -0,0 +1,40 @@ +init_sqls = [ + 'CREATE SCHEMA IF NOT EXISTS demo_db', + 'DROP TABLE IF EXISTS demo_db.demo_table', + ''' + CREATE TABLE demo_db.demo_table ( + id long, + v_int int, + v_long long, + v_float float, + v_double double, + v_varchar string, + v_bool boolean, + v_date date, + v_timestamp timestamp, + v_ts_ntz timestamp_ntz + ) + PARTITIONED BY (v_int,v_long,v_float,v_double,v_varchar,v_bool,v_date,v_timestamp,v_ts_ntz) + TBLPROPERTIES ('format-version'='2'); + ''' +] + +slt = 'test_case/iceberg_sink_append_only.slt' + +verify_schema = ['long', 'int', 'long', 'float', 'double', 'string', 'boolean', 'date', 'timestamp', 'timestamp_ntz'] + +verify_sql = 'SELECT * FROM demo_db.demo_table ORDER BY id ASC' + + +verify_data = """ +1,1,1000,1.1,1.11,1-1,true,2022-03-11,2022-03-11 01:00:00+00:00,2022-03-11 01:00:00 +2,2,2000,2.2,2.22,2-2,false,2022-03-12,2022-03-12 02:00:00+00:00,2022-03-12 02:00:00 +3,3,3000,3.3,3.33,3-3,true,2022-03-13,2022-03-13 03:00:00+00:00,2022-03-13 03:00:00 +4,4,4000,4.4,4.44,4-4,false,2022-03-14,2022-03-14 04:00:00+00:00,2022-03-14 04:00:00 +5,5,5000,5.5,5.55,5-5,true,2022-03-15,2022-03-15 05:00:00+00:00,2022-03-15 05:00:00 +""" + +drop_sqls = [ + 'DROP TABLE IF EXISTS demo_db.demo_table', + 'DROP SCHEMA IF EXISTS demo_db' +] diff --git a/e2e_test/iceberg/test_case/partition_upsert.toml b/e2e_test/iceberg/test_case/partition_upsert.toml new file mode 100644 index 0000000000000..d95178ed893fa --- /dev/null +++ b/e2e_test/iceberg/test_case/partition_upsert.toml @@ -0,0 +1,35 @@ +init_sqls = [ + 'CREATE SCHEMA IF NOT EXISTS demo_db', + 'DROP TABLE IF EXISTS demo_db.demo_table', + ''' + CREATE TABLE demo_db.demo_table ( + id int, + v1 int, + v2 long, + v3 string + ) USING iceberg + PARTITIONED BY (v1,v2) + TBLPROPERTIES ('format-version'='2'); + ''' +] + +slt = 'test_case/iceberg_sink_upsert.slt' + +verify_schema = ['int','int','long','string'] + +verify_sql = 'SELECT * FROM demo_db.demo_table ORDER BY id, v1 ASC' + +verify_data = """ +1,1,50,1-50 +1,2,2,2-2 +1,3,2,3-2 +1,5,2,5-2 +1,8,2,8-2 +1,13,2,13-2 +1,21,2,21-2 +""" + +drop_sqls = [ + 'DROP TABLE IF EXISTS demo_db.demo_table', + 'DROP SCHEMA IF EXISTS demo_db' +] diff --git a/e2e_test/over_window/generated/batch/create.slt.part b/e2e_test/over_window/generated/batch/create.slt.part index 8e489c3dde0bd..5f4b5e1152804 100644 --- a/e2e_test/over_window/generated/batch/create.slt.part +++ b/e2e_test/over_window/generated/batch/create.slt.part @@ -49,6 +49,16 @@ select , row_number() over (partition by p1 order by p2 desc, id) as out11 from t; +# over + agg +statement ok +create view v_e as +select + p1, p2 + , row_number() over (partition by p1 order by p2) as out12 + , sum(sum(v2)) over (partition by p1, avg(time) order by max(v1), p2) as out13 +from t +group by p1, p2; + statement ok create view v_a_b as select @@ -103,4 +113,4 @@ select , first_value(v1) over (partition by p1, p2 order by time, id rows 3 preceding) as out3 , lag(v1 + 2, 0 + 1) over (partition by p1 - 1 order by id) as out4 , min(v1 * 2) over (partition by p1, p2 order by time + 1, id rows between current row and unbounded following) as out5 -from t; \ No newline at end of file +from t; diff --git a/e2e_test/over_window/generated/batch/drop.slt.part b/e2e_test/over_window/generated/batch/drop.slt.part index 8eaca578e1f4e..435ffd46433e7 100644 --- a/e2e_test/over_window/generated/batch/drop.slt.part +++ b/e2e_test/over_window/generated/batch/drop.slt.part @@ -12,6 +12,9 @@ drop view v_c; statement ok drop view v_d; +statement ok +drop view v_e; + statement ok drop view v_a_b; diff --git a/e2e_test/over_window/generated/batch/mod.slt.part b/e2e_test/over_window/generated/batch/mod.slt.part index ff46877de7ddf..2c7778fd46aff 100644 --- a/e2e_test/over_window/generated/batch/mod.slt.part +++ b/e2e_test/over_window/generated/batch/mod.slt.part @@ -41,6 +41,13 @@ select * from v_d order by id; 100003 100 208 2 723 807 3 1 100004 103 200 2 702 808 1 1 +query iiii +select * from v_e order by p1; +---- +100 200 1 1611 +100 208 2 807 +103 200 1 808 + include ./cross_check.slt.part statement ok @@ -88,6 +95,14 @@ select * from v_d order by id; 100005 100 200 3 717 810 4 4 100006 105 204 5 703 828 1 1 +query iiii +select * from v_e order by p1, p2; +---- +100 200 1 2421 +100 208 2 3228 +103 200 1 808 +105 204 1 828 + include ./cross_check.slt.part statement ok @@ -139,6 +154,13 @@ select * from v_d order by id; 100005 100 200 1 717 810 2 4 100006 105 204 5 703 828 1 1 +query iiiiiii +select * from v_e order by p1; +---- +100 200 1 3228 +103 200 1 808 +105 204 1 828 + query iiiiiiiiii select * from v_expr order by id; ---- @@ -182,6 +204,12 @@ select * from v_d order by id; 100005 100 200 1 717 810 2 2 100006 105 204 5 703 828 1 1 +query iiii +select * from v_e order by p1; +---- +100 200 1 1615 +105 204 1 828 + query iiiiiiiiii select * from v_expr order by id; ---- diff --git a/e2e_test/over_window/generated/streaming/create.slt.part b/e2e_test/over_window/generated/streaming/create.slt.part index 23a496ff0f315..4334fb1cdd30e 100644 --- a/e2e_test/over_window/generated/streaming/create.slt.part +++ b/e2e_test/over_window/generated/streaming/create.slt.part @@ -49,6 +49,16 @@ select , row_number() over (partition by p1 order by p2 desc, id) as out11 from t; +# over + agg +statement ok +create materialized view v_e as +select + p1, p2 + , row_number() over (partition by p1 order by p2) as out12 + , sum(sum(v2)) over (partition by p1, avg(time) order by max(v1), p2) as out13 +from t +group by p1, p2; + statement ok create materialized view v_a_b as select @@ -103,4 +113,4 @@ select , first_value(v1) over (partition by p1, p2 order by time, id rows 3 preceding) as out3 , lag(v1 + 2, 0 + 1) over (partition by p1 - 1 order by id) as out4 , min(v1 * 2) over (partition by p1, p2 order by time + 1, id rows between current row and unbounded following) as out5 -from t; \ No newline at end of file +from t; diff --git a/e2e_test/over_window/generated/streaming/drop.slt.part b/e2e_test/over_window/generated/streaming/drop.slt.part index d469282f41247..e6c4fcfaad244 100644 --- a/e2e_test/over_window/generated/streaming/drop.slt.part +++ b/e2e_test/over_window/generated/streaming/drop.slt.part @@ -12,6 +12,9 @@ drop materialized view v_c; statement ok drop materialized view v_d; +statement ok +drop materialized view v_e; + statement ok drop materialized view v_a_b; diff --git a/e2e_test/over_window/generated/streaming/mod.slt.part b/e2e_test/over_window/generated/streaming/mod.slt.part index ff46877de7ddf..2c7778fd46aff 100644 --- a/e2e_test/over_window/generated/streaming/mod.slt.part +++ b/e2e_test/over_window/generated/streaming/mod.slt.part @@ -41,6 +41,13 @@ select * from v_d order by id; 100003 100 208 2 723 807 3 1 100004 103 200 2 702 808 1 1 +query iiii +select * from v_e order by p1; +---- +100 200 1 1611 +100 208 2 807 +103 200 1 808 + include ./cross_check.slt.part statement ok @@ -88,6 +95,14 @@ select * from v_d order by id; 100005 100 200 3 717 810 4 4 100006 105 204 5 703 828 1 1 +query iiii +select * from v_e order by p1, p2; +---- +100 200 1 2421 +100 208 2 3228 +103 200 1 808 +105 204 1 828 + include ./cross_check.slt.part statement ok @@ -139,6 +154,13 @@ select * from v_d order by id; 100005 100 200 1 717 810 2 4 100006 105 204 5 703 828 1 1 +query iiiiiii +select * from v_e order by p1; +---- +100 200 1 3228 +103 200 1 808 +105 204 1 828 + query iiiiiiiiii select * from v_expr order by id; ---- @@ -182,6 +204,12 @@ select * from v_d order by id; 100005 100 200 1 717 810 2 2 100006 105 204 5 703 828 1 1 +query iiii +select * from v_e order by p1; +---- +100 200 1 1615 +105 204 1 828 + query iiiiiiiiii select * from v_expr order by id; ---- diff --git a/e2e_test/over_window/templates/create.slt.part b/e2e_test/over_window/templates/create.slt.part index 0d16b52fcdc86..7ac749e459b02 100644 --- a/e2e_test/over_window/templates/create.slt.part +++ b/e2e_test/over_window/templates/create.slt.part @@ -47,6 +47,16 @@ select , row_number() over (partition by p1 order by p2 desc, id) as out11 from t; +# over + agg +statement ok +create $view_type v_e as +select + p1, p2 + , row_number() over (partition by p1 order by p2) as out12 + , sum(sum(v2)) over (partition by p1, avg(time) order by max(v1), p2) as out13 +from t +group by p1, p2; + statement ok create $view_type v_a_b as select @@ -101,4 +111,4 @@ select , first_value(v1) over (partition by p1, p2 order by time, id rows 3 preceding) as out3 , lag(v1 + 2, 0 + 1) over (partition by p1 - 1 order by id) as out4 , min(v1 * 2) over (partition by p1, p2 order by time + 1, id rows between current row and unbounded following) as out5 -from t; \ No newline at end of file +from t; diff --git a/e2e_test/over_window/templates/drop.slt.part b/e2e_test/over_window/templates/drop.slt.part index 926305ee42699..def8e92379878 100644 --- a/e2e_test/over_window/templates/drop.slt.part +++ b/e2e_test/over_window/templates/drop.slt.part @@ -10,6 +10,9 @@ drop $view_type v_c; statement ok drop $view_type v_d; +statement ok +drop $view_type v_e; + statement ok drop $view_type v_a_b; diff --git a/e2e_test/over_window/templates/mod.slt.part b/e2e_test/over_window/templates/mod.slt.part index 3e48a52358701..1b1b86a0d40d3 100644 --- a/e2e_test/over_window/templates/mod.slt.part +++ b/e2e_test/over_window/templates/mod.slt.part @@ -39,6 +39,13 @@ select * from v_d order by id; 100003 100 208 2 723 807 3 1 100004 103 200 2 702 808 1 1 +query iiii +select * from v_e order by p1; +---- +100 200 1 1611 +100 208 2 807 +103 200 1 808 + include ./cross_check.slt.part statement ok @@ -86,6 +93,14 @@ select * from v_d order by id; 100005 100 200 3 717 810 4 4 100006 105 204 5 703 828 1 1 +query iiii +select * from v_e order by p1, p2; +---- +100 200 1 2421 +100 208 2 3228 +103 200 1 808 +105 204 1 828 + include ./cross_check.slt.part statement ok @@ -137,6 +152,13 @@ select * from v_d order by id; 100005 100 200 1 717 810 2 4 100006 105 204 5 703 828 1 1 +query iiiiiii +select * from v_e order by p1; +---- +100 200 1 3228 +103 200 1 808 +105 204 1 828 + query iiiiiiiiii select * from v_expr order by id; ---- @@ -180,6 +202,12 @@ select * from v_d order by id; 100005 100 200 1 717 810 2 2 100006 105 204 5 703 828 1 1 +query iiii +select * from v_e order by p1; +---- +100 200 1 1615 +105 204 1 828 + query iiiiiiiiii select * from v_expr order by id; ---- diff --git a/e2e_test/s3/fs_source_v2.py b/e2e_test/s3/fs_source_v2.py new file mode 100644 index 0000000000000..a687c9be19c9d --- /dev/null +++ b/e2e_test/s3/fs_source_v2.py @@ -0,0 +1,155 @@ +import os +import sys +import csv +import json +import random +import psycopg2 + +from time import sleep +from io import StringIO +from minio import Minio +from functools import partial + +def gen_data(file_num, item_num_per_file): + assert item_num_per_file % 2 == 0, \ + f'item_num_per_file should be even to ensure sum(mark) == 0: {item_num_per_file}' + return [ + [{ + 'id': file_id * item_num_per_file + item_id, + 'name': f'{file_id}_{item_id}', + 'sex': item_id % 2, + 'mark': (-1) ** (item_id % 2), + } for item_id in range(item_num_per_file)] + for file_id in range(file_num) + ] + +def format_json(data): + return [ + '\n'.join([json.dumps(item) for item in file]) + for file in data + ] + +def format_csv(data, with_header): + csv_files = [] + + for file_data in data: + ostream = StringIO() + writer = csv.DictWriter(ostream, fieldnames=file_data[0].keys()) + if with_header: + writer.writeheader() + for item_data in file_data: + writer.writerow(item_data) + csv_files.append(ostream.getvalue()) + return csv_files + +def do_test(config, file_num, item_num_per_file, prefix, fmt): + conn = psycopg2.connect( + host="localhost", + port="4566", + user="root", + database="dev" + ) + + # Open a cursor to execute SQL statements + cur = conn.cursor() + + def _table(): + return f's3_test_{fmt}' + + def _encode(): + if fmt == 'json': + return 'JSON' + else: + return f"CSV (delimiter = ',', without_header = {str('without' in fmt).lower()})" + + # Execute a SELECT statement + cur.execute(f'''CREATE TABLE {_table()}( + id int, + name TEXT, + sex int, + mark int, + ) WITH ( + connector = 's3_v2', + match_pattern = '{prefix}*.{fmt}', + s3.region_name = '{config['S3_REGION']}', + s3.bucket_name = '{config['S3_BUCKET']}', + s3.credentials.access = '{config['S3_ACCESS_KEY']}', + s3.credentials.secret = '{config['S3_SECRET_KEY']}', + s3.endpoint_url = 'https://{config['S3_ENDPOINT']}' + ) FORMAT PLAIN ENCODE {_encode()};''') + + total_rows = file_num * item_num_per_file + MAX_RETRIES = 40 + for retry_no in range(MAX_RETRIES): + cur.execute(f'select count(*) from {_table()}') + result = cur.fetchone() + if result[0] == total_rows: + break + print(f"[retry {retry_no}] Now got {result[0]} rows in table, {total_rows} expected, wait 30s") + sleep(30) + + stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_table()}' + print(f'Execute {stmt}') + cur.execute(stmt) + result = cur.fetchone() + + print('Got:', result) + + def _assert_eq(field, got, expect): + assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.' + + _assert_eq('count(*)', result[0], total_rows) + _assert_eq('sum(id)', result[1], (total_rows - 1) * total_rows / 2) + _assert_eq('sum(sex)', result[2], total_rows / 2) + _assert_eq('sum(mark)', result[3], 0) + + print('Test pass') + + cur.execute(f'drop table {_table()}') + cur.close() + conn.close() + + +if __name__ == "__main__": + FILE_NUM = 4001 + ITEM_NUM_PER_FILE = 2 + data = gen_data(FILE_NUM, ITEM_NUM_PER_FILE) + + fmt = sys.argv[1] + FORMATTER = { + 'json': format_json, + 'csv_with_header': partial(format_csv, with_header=True), + 'csv_without_header': partial(format_csv, with_header=False), + } + assert fmt in FORMATTER, f"Unsupported format: {fmt}" + formatted_files = FORMATTER[fmt](data) + + config = json.loads(os.environ["S3_SOURCE_TEST_CONF"]) + client = Minio( + config["S3_ENDPOINT"], + access_key=config["S3_ACCESS_KEY"], + secret_key=config["S3_SECRET_KEY"], + secure=True, + ) + run_id = str(random.randint(1000, 9999)) + _local = lambda idx: f'data_{idx}.{fmt}' + _s3 = lambda idx: f"{run_id}_data_{idx}.{fmt}" + + # put s3 files + for idx, file_str in enumerate(formatted_files): + with open(_local(idx), "w") as f: + f.write(file_str) + os.fsync(f.fileno()) + + client.fput_object( + config["S3_BUCKET"], + _s3(idx), + _local(idx) + ) + + # do test + do_test(config, FILE_NUM, ITEM_NUM_PER_FILE, run_id, fmt) + + # clean up s3 files + for idx, _ in enumerate(formatted_files): + client.remove_object(config["S3_BUCKET"], _s3(idx)) diff --git a/e2e_test/sink/append_only_sink.slt b/e2e_test/sink/append_only_sink.slt index 5ace195ec48ce..405ca132ae0a9 100644 --- a/e2e_test/sink/append_only_sink.slt +++ b/e2e_test/sink/append_only_sink.slt @@ -22,7 +22,7 @@ create sink invalid_sink_type from t with (connector = 'blackhole', type = 'inva statement error `force_append_only` must be true or false create sink invalid_force_append_only from t with (connector = 'blackhole', force_append_only = 'invalid'); -statement error invalid connector type: invalid +statement error db error: ERROR: QueryError: Sink error: config error: unsupported sink type invalid create sink invalid_connector from t with (connector = 'invalid'); statement ok diff --git a/e2e_test/sink/elasticsearch/elasticsearch_sink.slt b/e2e_test/sink/elasticsearch/elasticsearch_sink.slt index 2728f33da04e5..cecf9a47cf94a 100644 --- a/e2e_test/sink/elasticsearch/elasticsearch_sink.slt +++ b/e2e_test/sink/elasticsearch/elasticsearch_sink.slt @@ -3,7 +3,7 @@ CREATE TABLE t7 (v1 int primary key, v2 bigint, v3 varchar); statement ok CREATE SINK s7 AS select t7.v1 as v1, t7.v2 as v2, t7.v3 as v3 from t7 WITH ( - connector = 'elasticsearch-7', + connector = 'elasticsearch', index = 'test', url = 'http://elasticsearch:9200', username = 'elastic', diff --git a/e2e_test/sink/kafka/create_sink.slt b/e2e_test/sink/kafka/create_sink.slt index b1b48ce93bead..a1f296774f526 100644 --- a/e2e_test/sink/kafka/create_sink.slt +++ b/e2e_test/sink/kafka/create_sink.slt @@ -10,7 +10,7 @@ create table t_kafka ( v_timestamp timestamp ); -statement error failed to fetch metadata from kafka +statement error cannot connect to kafka broker create sink sink_non_exist_broker from t_kafka with ( connector = 'kafka', properties.bootstrap.server = 'make no sense', @@ -19,15 +19,6 @@ create sink sink_non_exist_broker from t_kafka with ( type = 'append-only', ); -statement error topic invalid_topic not found -create sink sink_non_exist_topic from t_kafka with ( - connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', - topic = 'invalid_topic', - force_append_only = 'true', - type = 'append-only', -); - # Test create sink with connection # Create a mock connection statement ok @@ -40,7 +31,7 @@ create connection mock with ( statement error create sink si_kafka_append_only_conn from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-append-only', type = 'append-only', force_append_only = 'true', @@ -51,7 +42,7 @@ create sink si_kafka_append_only_conn from t_kafka with ( statement ok create sink si_kafka_append_only_conn from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-append-only', type = 'append-only', force_append_only = 'true', @@ -75,7 +66,7 @@ drop connection mock; statement error sink cannot be append-only create sink si_kafka_append_only from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-append-only', type = 'append-only', ); @@ -83,7 +74,7 @@ create sink si_kafka_append_only from t_kafka with ( statement ok create sink si_kafka_append_only from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-append-only', type = 'append-only', force_append_only = 'true' @@ -92,7 +83,7 @@ create sink si_kafka_append_only from t_kafka with ( statement error primary key not defined create sink si_kafka_upsert from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-upsert', type = 'upsert', ); @@ -100,25 +91,35 @@ create sink si_kafka_upsert from t_kafka with ( statement ok create sink si_kafka_upsert from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-upsert', type = 'upsert', primary_key = 'id', ); +statement ok +create sink si_kafka_upsert_schema from t_kafka with ( + connector = 'kafka', + properties.bootstrap.server = 'message_queue:29092', + topic = 'test-rw-sink-upsert-schema', + primary_key = 'id', +) format upsert encode json ( + schemas.enable = true +); + statement ok create sink si_kafka_debezium from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-debezium', type = 'debezium', primary_key = 'id', ); -statement error primary key not defined for debezium kafka sink +statement error primary key not defined create sink debezium_without_pk from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-debezium', type = 'debezium', ); @@ -126,7 +127,7 @@ create sink debezium_without_pk from t_kafka with ( statement ok create sink multiple_pk from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-debezium', type = 'debezium', primary_key = 'id,v_varchar' @@ -138,7 +139,7 @@ drop sink multiple_pk; statement error Sink primary key column not found: invalid. create sink invalid_pk_column from t_kafka with ( connector = 'kafka', - properties.bootstrap.server = '127.0.0.1:29092', + properties.bootstrap.server = 'message_queue:29092', topic = 'test-rw-sink-debezium', type = 'debezium', primary_key = 'id,invalid' diff --git a/e2e_test/sink/kafka/debezium.py b/e2e_test/sink/kafka/debezium.py index 4d8e375e7a3bf..4ec5e31281d88 100644 --- a/e2e_test/sink/kafka/debezium.py +++ b/e2e_test/sink/kafka/debezium.py @@ -15,11 +15,11 @@ if kv[1] == "null": value = kv[1] else: - value = json.loads(kv[1]) value = json.loads(kv[1]) # The `ts_ms` field may vary, so we delete it from the json object # and assert the remaining fields equal. del value["payload"]["ts_ms"] + del value["payload"]["source"]["ts_ms"] expected_data.append(key) expected_data.append(value) @@ -37,6 +37,8 @@ # Assert `ts_ms` is an integer here. assert isinstance(value["payload"]["ts_ms"], int) del value["payload"]["ts_ms"] + assert isinstance(value["payload"]["source"]["ts_ms"], int) + del value["payload"]["source"]["ts_ms"] test_data.append(key) test_data.append(value) diff --git a/e2e_test/sink/kafka/debezium1.result b/e2e_test/sink/kafka/debezium1.result index 6c728e55346d9..bc92096cd2c62 100644 --- a/e2e_test/sink/kafka/debezium1.result +++ b/e2e_test/sink/kafka/debezium1.result @@ -1,10 +1,10 @@ -{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} diff --git a/e2e_test/sink/kafka/debezium2.result b/e2e_test/sink/kafka/debezium2.result index 3012648769070..c1f1524c9a825 100644 --- a/e2e_test/sink/kafka/debezium2.result +++ b/e2e_test/sink/kafka/debezium2.result @@ -1,11 +1,11 @@ -{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"before":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"op":"u","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549594077},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"before":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"op":"u","source":{"db":"dev","table":"t_kafka","ts_ms":1696838696640},"ts_ms":1696838696640},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} diff --git a/e2e_test/sink/kafka/debezium3.result b/e2e_test/sink/kafka/debezium3.result index de658fe7157f2..2445a019d7d44 100644 --- a/e2e_test/sink/kafka/debezium3.result +++ b/e2e_test/sink/kafka/debezium3.result @@ -1,13 +1,13 @@ -{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} {"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} null -{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":null,"before":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"op":"d","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549624995},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"before":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"op":"u","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549594077},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} -{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka"},"ts_ms":1693549473650},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":null,"before":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"op":"d","source":{"db":"dev","table":"t_kafka","ts_ms":1696838869564},"ts_ms":1696838869564},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"before":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"op":"u","source":{"db":"dev","table":"t_kafka","ts_ms":1696838696640},"ts_ms":1696838696640},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} +{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"RisingWave.dev.t_kafka.Key","optional":false,"type":"struct"}} {"payload":{"after":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"before":null,"op":"c","source":{"db":"dev","table":"t_kafka","ts_ms":1696838120862},"ts_ms":1696838120862},"schema":{"fields":[{"field":"before","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"after","fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Key","optional":true,"type":"struct"},{"field":"source","fields":[{"field":"db","optional":false,"type":"string"},{"field":"table","optional":true,"type":"string"},{"field":"table","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Source","optional":false,"type":"struct"},{"field":"op","optional":false,"type":"string"},{"field":"ts_ms","optional":false,"type":"int64"}],"name":"RisingWave.dev.t_kafka.Envelope","optional":false,"type":"struct"}} diff --git a/e2e_test/sink/kafka/drop_sink.slt b/e2e_test/sink/kafka/drop_sink.slt index 4b9b10eceb3d7..9479671b9f0f0 100644 --- a/e2e_test/sink/kafka/drop_sink.slt +++ b/e2e_test/sink/kafka/drop_sink.slt @@ -7,5 +7,8 @@ drop sink si_kafka_upsert; statement ok drop sink si_kafka_debezium; +statement ok +drop sink si_kafka_upsert_schema; + statement ok drop table t_kafka; diff --git a/e2e_test/sink/kafka/protobuf.slt b/e2e_test/sink/kafka/protobuf.slt new file mode 100644 index 0000000000000..87ab884eddbde --- /dev/null +++ b/e2e_test/sink/kafka/protobuf.slt @@ -0,0 +1,97 @@ +statement ok +create table from_kafka with ( + connector = 'kafka', + topic = 'test-rw-sink-append-only-protobuf', + properties.bootstrap.server = 'message_queue:29092') +format plain encode protobuf ( + schema.location = 'file:///risingwave/proto-recursive', + message = 'recursive.AllTypes'); + +statement ok +create table into_kafka ( + bool_field bool, + string_field varchar, + bytes_field bytea, + float_field real, + double_field double precision, + int32_field int, + int64_field bigint, + sint32_field int, + sint64_field bigint, + sfixed32_field int, + sfixed64_field bigint, + nested_message_field struct, + repeated_int_field int[], + timestamp_field timestamptz, + oneof_int32 int); + +statement ok +insert into into_kafka values + (true, 'Rising', 'a0', 3.5, 4.25, 22, 23, 24, null, 26, 27, row(1, ''), array[4, 0, 4], '2006-01-02 15:04:05-07:00', 42), + (false, 'Wave', 'ZDF', 1.5, null, 11, 12, 13, 14, 15, 16, row(4, 'foo'), null, null, null); + +statement ok +flush; + +statement ok +create sink sink0 from into_kafka with ( + connector = 'kafka', + topic = 'test-rw-sink-append-only-protobuf', + properties.bootstrap.server = 'message_queue:29092') +format plain encode protobuf ( + force_append_only = true, + schema.location = 'file:///risingwave/proto-recursive', + message = 'recursive.AllTypes'); + +sleep 2s + +query TTTRRIIIIIITTTI +select + bool_field, + string_field, + bytes_field, + float_field, + double_field, + int32_field, + int64_field, + sint32_field, + sint64_field, + sfixed32_field, + sfixed64_field, + nested_message_field, + repeated_int_field, + timestamp_field, + oneof_int32 from from_kafka; +---- +t Rising \x6130 3.5 4.25 22 23 24 0 26 27 (1,) {4,0,4} (1136239445,0) 42 +f Wave \x5a4446 1.5 0 11 12 13 14 15 16 (4,foo) {} (0,0) 0 + +statement error failed to read file +create sink sink_err from into_kafka with ( + connector = 'kafka', + topic = 'test-rw-sink-append-only-protobuf', + properties.bootstrap.server = 'message_queue:29092') +format plain encode protobuf ( + force_append_only = true, + schema.location = 'file:///risingwave/proto-recursiv', + message = 'recursive.AllTypes'); + +statement error encode extra_column error: field not in proto +create sink sink_err as select 1 as extra_column with ( + connector = 'kafka', + topic = 'test-rw-sink-append-only-protobuf', + properties.bootstrap.server = 'message_queue:29092') +format plain encode protobuf ( + force_append_only = true, + schema.location = 'file:///risingwave/proto-recursive', + message = 'recursive.AllTypes'); + +statement error s3 URL not supported yet +create sink sink_err from into_kafka with ( + connector = 'kafka', + topic = 'test-rw-sink-append-only-protobuf', + properties.bootstrap.server = 'message_queue:29092') +format plain encode protobuf ( + force_append_only = true, + schema.location = 's3:///risingwave/proto-recursive', + message = 'recursive.AllTypes'); diff --git a/e2e_test/sink/kafka/upsert_schema1.result b/e2e_test/sink/kafka/upsert_schema1.result new file mode 100644 index 0000000000000..d33394de4e5b9 --- /dev/null +++ b/e2e_test/sink/kafka/upsert_schema1.result @@ -0,0 +1,10 @@ +{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} diff --git a/e2e_test/sink/kafka/upsert_schema2.result b/e2e_test/sink/kafka/upsert_schema2.result new file mode 100644 index 0000000000000..e560df682107e --- /dev/null +++ b/e2e_test/sink/kafka/upsert_schema2.result @@ -0,0 +1,11 @@ +{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} diff --git a/e2e_test/sink/kafka/upsert_schema3.result b/e2e_test/sink/kafka/upsert_schema3.result new file mode 100644 index 0000000000000..85c3db9768e78 --- /dev/null +++ b/e2e_test/sink/kafka/upsert_schema3.result @@ -0,0 +1,12 @@ +{"payload":{"id":10},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":10,"v_bigint":20674,"v_double":9042.404483827513,"v_float":19387.23828125,"v_integer":20674,"v_smallint":26951,"v_timestamp":1681404058888,"v_varchar":"0oVqRIHqkb"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} null +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":1,"v_bigint":0,"v_double":0.0,"v_float":0.0,"v_integer":0,"v_smallint":0,"v_timestamp":0,"v_varchar":""},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":1},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":1,"v_bigint":1872,"v_double":23956.39329760601,"v_float":26261.416015625,"v_integer":1872,"v_smallint":31031,"v_timestamp":1681453634104,"v_varchar":"8DfUFencLe"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":2},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":2,"v_bigint":4598,"v_double":31923.077305746086,"v_float":27031.224609375,"v_integer":4598,"v_smallint":22690,"v_timestamp":1681429444869,"v_varchar":"sIo1XXVeHZ"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":3},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":3,"v_bigint":14894,"v_double":9742.475509566086,"v_float":2660.290283203125,"v_integer":5894,"v_smallint":5985,"v_timestamp":1681429011269,"v_varchar":"LVLAhd1pQv"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":4},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":4,"v_bigint":24962,"v_double":3119.719721891862,"v_float":21217.77734375,"v_integer":7406,"v_smallint":6306,"v_timestamp":1681434727993,"v_varchar":"ORjwy3oMNb"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":5},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":5,"v_bigint":9253,"v_double":17464.91553421121,"v_float":22749.5,"v_integer":9253,"v_smallint":22765,"v_timestamp":1681444642324,"v_varchar":"sSkKswxrYd"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":6},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":6,"v_bigint":28842,"v_double":11210.458724794062,"v_float":5885.3681640625,"v_integer":10844,"v_smallint":4014,"v_timestamp":1681382522137,"v_varchar":"V4y71v4Gip"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":7},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":7,"v_bigint":15914,"v_double":10967.182297153104,"v_float":3946.743408203125,"v_integer":12652,"v_smallint":10324,"v_timestamp":1681447263083,"v_varchar":"YIVLnWxHyf"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":8},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":8,"v_bigint":28641,"v_double":993.408963466774,"v_float":13652.0732421875,"v_integer":19036,"v_smallint":194,"v_timestamp":1681393929356,"v_varchar":"lv7Eq3g8hx"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} +{"payload":{"id":9},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} {"payload":{"id":9,"v_bigint":24837,"v_double":11615.276406159757,"v_float":20699.55859375,"v_integer":20090,"v_smallint":10028,"v_timestamp":1681389642487,"v_varchar":"nwRq4zejSQ"},"schema":{"fields":[{"field":"id","optional":true,"type":"int32"},{"field":"v_varchar","optional":true,"type":"string"},{"field":"v_smallint","optional":true,"type":"int16"},{"field":"v_integer","optional":true,"type":"int32"},{"field":"v_bigint","optional":true,"type":"int64"},{"field":"v_float","optional":true,"type":"float"},{"field":"v_double","optional":true,"type":"double"},{"field":"v_timestamp","optional":true,"type":"int64"}],"name":"dev.t_kafka","optional":false,"type":"struct"}} diff --git a/e2e_test/sink/pulsar_sink.slt b/e2e_test/sink/pulsar_sink.slt new file mode 100644 index 0000000000000..944176857eca2 --- /dev/null +++ b/e2e_test/sink/pulsar_sink.slt @@ -0,0 +1,51 @@ +statement ok +CREATE TABLE pulsar ( + id BIGINT, + content VARCHAR, + PRIMARY KEY (id) +); + +statement ok +INSERT INTO pulsar VALUES (1, 'test1'); + +statement ok +INSERT INTO pulsar VALUES (2, 'test2'); + +statement ok +INSERT INTO pulsar VALUES (3, 'test3'); + +statement ok +CREATE SINK pulsar_sink FROM pulsar +WITH ( + connector='pulsar', + type='upsert', + primary_key='id', + topic='persistent://public/default/rw', + service.url='pulsar://pulsar:6650', +); + +statement ok +CREATE TABLE pulsar_source ( + id BIGINT, + content VARCHAR +) +WITH ( + connector='pulsar', + topic='persistent://public/default/rw', + service.url='pulsar://pulsar:6650', + scan.startup.mode='earliest', +) FORMAT PLAIN ENCODE JSON; + +statement ok +FLUSH; + +# We can't control Sink -> Source Table +# latency. We just have to wait. +sleep 5s + +query IT rowsort +SELECT * FROM pulsar_source; +---- +1 test1 +2 test2 +3 test3 \ No newline at end of file diff --git a/e2e_test/sink/remote/jdbc.check.pg.slt b/e2e_test/sink/remote/jdbc.check.pg.slt index 82d87571b35ac..6293c44a5a444 100644 --- a/e2e_test/sink/remote/jdbc.check.pg.slt +++ b/e2e_test/sink/remote/jdbc.check.pg.slt @@ -39,3 +39,8 @@ select * from biz.t2 order by "aBc"; ---- 1 2 + +query IT +select * from t1_uuid; +---- +221 74605c5a-a7bb-4b3b-8742-2a12e9709dea hello world diff --git a/e2e_test/sink/remote/jdbc.load.slt b/e2e_test/sink/remote/jdbc.load.slt index 235348f5bd82c..70ad3f9a3a42b 100644 --- a/e2e_test/sink/remote/jdbc.load.slt +++ b/e2e_test/sink/remote/jdbc.load.slt @@ -144,6 +144,22 @@ CREATE SINK s_pg_t2 FROM tt2 WITH ( force_append_only=true ); + +statement ok +create table t1_uuid (v1 int primary key, v2 varchar, v3 varchar); + +statement ok +CREATE SINK s1_uuid FROM t1_uuid WITH ( + connector='jdbc', + jdbc.url='jdbc:postgresql://db:5432/test?user=test&password=connector', + table.name='t1_uuid', + primary_key='v1', + type='upsert' +); + +statement ok +INSERT INTO t1_uuid values (221, '74605c5a-a7bb-4b3b-8742-2a12e9709dea', 'hello world'); + statement ok INSERT INTO tt2 VALUES (1), diff --git a/e2e_test/sink/remote/pg_create_table.sql b/e2e_test/sink/remote/pg_create_table.sql index c899b788e3506..fd06aca93ce7b 100644 --- a/e2e_test/sink/remote/pg_create_table.sql +++ b/e2e_test/sink/remote/pg_create_table.sql @@ -53,6 +53,8 @@ CREATE TABLE t_types ( jsonb_column JSONB ); +CREATE TABLE t1_uuid (v1 int primary key, v2 uuid, v3 varchar); + CREATE SCHEMA biz; CREATE TABLE biz.t_types ( id BIGINT PRIMARY KEY, diff --git a/e2e_test/source/basic/alter/kafka.slt b/e2e_test/source/basic/alter/kafka.slt index 6e2b7b88d2727..7b355f6407e52 100644 --- a/e2e_test/source/basic/alter/kafka.slt +++ b/e2e_test/source/basic/alter/kafka.slt @@ -14,13 +14,22 @@ CREATE SOURCE s2 (v2 varchar) with ( scan.startup.mode = 'earliest' ) FORMAT PLAIN ENCODE JSON; +statement ok +CREATE TABLE t (v1 int) with ( + connector = 'kafka', + topic = 'kafka_alter', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) FORMAT PLAIN ENCODE JSON; + + statement ok create materialized view mv1 as select * from s1; statement ok create materialized view mv2 as select * from s2; -sleep 10s +sleep 5s statement ok flush; @@ -35,6 +44,11 @@ select * from s2; ---- 11 +query I +select * from t; +---- +1 + # alter source statement ok alter source s1 add column v2 varchar; @@ -49,7 +63,10 @@ create materialized view mv3 as select * from s1; statement ok create materialized view mv4 as select * from s2; -sleep 10s +statement ok +alter table t add column v2 varchar; + +sleep 5s statement ok flush; @@ -84,6 +101,11 @@ select * from mv4 ---- 11 NULL +query IT +select * from t +---- +1 NULL + # alter source again statement ok alter source s1 add column v3 int; @@ -91,7 +113,7 @@ alter source s1 add column v3 int; statement ok create materialized view mv5 as select * from s1; -sleep 10s +sleep 5s statement ok flush; diff --git a/e2e_test/source/basic/alter/kafka_after_new_data.slt b/e2e_test/source/basic/alter/kafka_after_new_data.slt index 2b0ab659766e9..5a73b749079f9 100644 --- a/e2e_test/source/basic/alter/kafka_after_new_data.slt +++ b/e2e_test/source/basic/alter/kafka_after_new_data.slt @@ -45,6 +45,21 @@ select * from mv5 1 11 111 2 22 222 +query IT rowsort +select * from t +---- +1 NULL +2 22 + +statement ok +alter table t add column v3 int; + +query IT rowsort +select * from t +---- +1 NULL NULL +2 22 NULL + statement ok drop materialized view mv1 diff --git a/e2e_test/source/basic/alter/kafka_after_new_data_2.slt b/e2e_test/source/basic/alter/kafka_after_new_data_2.slt new file mode 100644 index 0000000000000..c10634d259138 --- /dev/null +++ b/e2e_test/source/basic/alter/kafka_after_new_data_2.slt @@ -0,0 +1,14 @@ +sleep 5s + +statement ok +flush; + +query IT rowsort +select * from t +---- +1 NULL NULL +2 22 NULL +3 33 333 + +statement ok +drop table t; \ No newline at end of file diff --git a/e2e_test/source/basic/kafka_batch.slt b/e2e_test/source/basic/kafka_batch.slt index 8d8d454c7c977..a1b7690a36c45 100644 --- a/e2e_test/source/basic/kafka_batch.slt +++ b/e2e_test/source/basic/kafka_batch.slt @@ -92,6 +92,28 @@ select * from s1 where _rw_kafka_timestamp > '1977-01-01 00:00:00+00:00' 3 333 4 4444 +query IT rowsort +select * from s1 where _rw_kafka_timestamp > '1977-01-01 00:00:00' +---- +1 1 +2 22 +3 333 +4 4444 + +query IT rowsort +select * from s1 where _rw_kafka_timestamp > TO_TIMESTAMP('1977-01-01 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US') +---- +1 1 +2 22 +3 333 +4 4444 + +statement error expected format +select * from s1 where _rw_kafka_timestamp > 'abc' + +statement error out of range +select * from s1 where _rw_kafka_timestamp < TO_TIMESTAMP(2147483647 + 1) + query IT select * from s1 where _rw_kafka_timestamp > '2045-01-01 0:00:00+00:00' ---- diff --git a/e2e_test/source/cdc/cdc.validate.postgres.slt b/e2e_test/source/cdc/cdc.validate.postgres.slt index 15fce0b554dc9..4ac6669913c58 100644 --- a/e2e_test/source/cdc/cdc.validate.postgres.slt +++ b/e2e_test/source/cdc/cdc.validate.postgres.slt @@ -125,3 +125,45 @@ create table shipments ( table.name = 'shipments', slot.name = 'shipments' ); + +# format & encode provided and match with debezium json, this is okay +statement ok +explain create table shipments ( + shipment_id INTEGER, + order_id INTEGER, + origin STRING, + destination STRING, + is_arrived boolean, + PRIMARY KEY (shipment_id) +) with ( + connector = 'postgres-cdc', + hostname = 'db', + port = '5432', + username = 'postgres', + password = 'postgres', + database.name = 'cdc_test', + schema.name = 'public', + table.name = 'shipments', + slot.name = 'shipments' +) format debezium encode json; + +# format & encode provided but mismatch with debezium json, this is not allowed +statement error Row format for CDC connectors should be either omitted or set to `FORMAT DEBEZIUM ENCODE JSON` +create table shipments ( + shipment_id INTEGER, + order_id real, + origin STRING, + destination STRING, + is_arrived boolean, + PRIMARY KEY (shipment_id) +) with ( + connector = 'postgres-cdc', + hostname = 'db', + port = '5432', + username = 'postgres', + password = 'postgres', + database.name = 'cdc_test', + schema.name = 'public', + table.name = 'shipments', + slot.name = 'shipments' +) format canal encode csv; diff --git a/e2e_test/source/cdc/mysql_cdc.sql b/e2e_test/source/cdc/mysql_cdc.sql index 89e5274ac3ee1..81d7a46f17876 100644 --- a/e2e_test/source/cdc/mysql_cdc.sql +++ b/e2e_test/source/cdc/mysql_cdc.sql @@ -1,4 +1,4 @@ -DROP DATABASE IF EXISTS mydb; +DROP DATABASE IF EXISTS `my@db`; CREATE DATABASE `my@db`; USE `my@db`; diff --git a/e2e_test/source/cdc_inline/alter/postgres_alter.slt b/e2e_test/source/cdc_inline/alter/postgres_alter.slt new file mode 100644 index 0000000000000..807b6a152af54 --- /dev/null +++ b/e2e_test/source/cdc_inline/alter/postgres_alter.slt @@ -0,0 +1,92 @@ +control substitution on + +system ok +psql -c " +CREATE TABLE alter_test (k BIGINT PRIMARY KEY, v CHARACTER VARYING); +INSERT INTO alter_test VALUES (1, 'a'), (2, 'b'); +" + +statement ok +CREATE TABLE alter_test (k BIGINT PRIMARY KEY, v CHARACTER VARYING) +WITH ( + connector = 'postgres-cdc', + hostname = '${PGHOST:localhost}', + port = '${PGPORT:5432}', + username = '${PGUSER:$USER}', + password = '${PGPASSWORD:}', + database.name = '${PGDATABASE:postgres}', + schema.name = 'public', + table.name = 'alter_test', + slot.name = 'alter_test' +) + +sleep 5s + +query IT +SELECT * FROM alter_test ORDER BY k +---- +1 a +2 b + +statement ok +ALTER TABLE alter_test ADD COLUMN v2 CHARACTER VARYING + +system ok +psql -c " +INSERT INTO alter_test VALUES (3, 'c'); +" + +# FIXME: after schema change in RisingWave, why does it take so long to get the new data? +sleep 20s + +query ITT +SELECT * FROM alter_test ORDER BY k +---- +1 a NULL +2 b NULL +3 c NULL + +system ok +psql -c " +ALTER TABLE alter_test ADD COLUMN v2 CHARACTER VARYING; +INSERT INTO alter_test VALUES (4, 'd', 'dd'); +" + +sleep 5s + +query ITT +SELECT * FROM alter_test ORDER BY k +---- +1 a NULL +2 b NULL +3 c NULL +4 d dd + +statement ok +ALTER TABLE alter_test DROP COLUMN v + +system ok +psql -c " +ALTER TABLE alter_test DROP COLUMN v; +INSERT INTO alter_test VALUES (5, 'ee'); +" + +# FIXME: after schema change in RisingWave, why does it take so long to get the new data? +sleep 20s + +query IT +SELECT * FROM alter_test ORDER BY k +---- +1 NULL +2 NULL +3 NULL +4 dd +5 ee + +statement ok +DROP TABLE alter_test + +system ok +psql -c " +DROP TABLE alter_test; +" diff --git a/e2e_test/streaming/aggregate/boolean.slt b/e2e_test/streaming/aggregate/boolean.slt new file mode 100644 index 0000000000000..86cf018fda4e1 --- /dev/null +++ b/e2e_test/streaming/aggregate/boolean.slt @@ -0,0 +1,67 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table t (v boolean); + +statement ok +create materialized view mv as select + bool_and(v), + bool_or(v) +from t; + +query BB +select * from mv; +---- +NULL NULL + + +statement ok +insert into t values (true); + +# table values: true + +query BB +select * from mv; +---- +t t + + +statement ok +insert into t values (false); + +# table values: true, false + +query BB +select * from mv; +---- +f t + + +statement ok +delete from t where v = true; + +# table values: false + +query BB +select * from mv; +---- +f f + + +statement ok +delete from t; + +# table values: empty + +query BB +select * from mv; +---- +NULL NULL + + +statement ok +drop materialized view mv; + +statement ok +drop table t; diff --git a/e2e_test/streaming/aggregate/jsonb_agg.slt b/e2e_test/streaming/aggregate/jsonb_agg.slt new file mode 100644 index 0000000000000..18cb80cc69085 --- /dev/null +++ b/e2e_test/streaming/aggregate/jsonb_agg.slt @@ -0,0 +1,46 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table t(v1 boolean, v2 int, v3 varchar, v4 jsonb); + +statement ok +create materialized view mv_tmp as +select jsonb_agg(v1) as j1 from t; + +statement ok +drop materialized view mv_tmp; + +statement ok +create materialized view mv1 as +select + jsonb_agg(v1 order by v2) as j1, + jsonb_agg(v2 order by v2) as j2, + jsonb_object_agg(v3, v4) as j3 +from t; + +statement ok +insert into t values + (null, 2, 'bbb', null), + (false, 1, 'ccc', 'null'); + +query TTT +select * from mv1; +---- +[false, null] [1, 2] {"bbb": null, "ccc": null} + +statement ok +insert into t values + (true, 0, 'bbb', '999'), + (true, 8, 'ddd', '{"foo": "bar"}'); + +query TTT +select * from mv1; +---- +[true, false, null, true] [0, 1, 2, 8] {"bbb": 999, "ccc": null, "ddd": {"foo": "bar"}} + +statement ok +drop materialized view mv1; + +statement ok +drop table t; diff --git a/e2e_test/streaming/bug_fixes/issue_12299.slt b/e2e_test/streaming/bug_fixes/issue_12299.slt new file mode 100644 index 0000000000000..7be47038f15cf --- /dev/null +++ b/e2e_test/streaming/bug_fixes/issue_12299.slt @@ -0,0 +1,31 @@ +# https://github.com/risingwavelabs/risingwave/issues/12299 +# TL;DR When upstream's stream key is not pk and the stream scan does not contain whole pk. + +statement ok +create table t1( + id bigint primary key, + i bigint +); + +statement ok +create materialized view mv1 as select id, i from t1 order by id, i; + +statement ok +insert into t1 values(1, 1); + +statement ok +create materialized view mv2 as select id from mv1; + +query I +select * from mv2; +---- +1 + +statement ok +drop materialized view mv2; + +statement ok +drop materialized view mv1; + +statement ok +drop table t1; \ No newline at end of file diff --git a/e2e_test/streaming/eowc/eowc_select.slt b/e2e_test/streaming/eowc/eowc_select.slt new file mode 100644 index 0000000000000..6af9b78028623 --- /dev/null +++ b/e2e_test/streaming/eowc/eowc_select.slt @@ -0,0 +1,56 @@ +statement ok +set RW_IMPLICIT_FLUSH to true; + +statement ok +set streaming_parallelism = 1; + +statement ok +create table t ( + tm timestamp, + foo int, + watermark for tm as tm - interval '5 minutes' +) append only; + +statement ok +set streaming_parallelism = 0; + +statement ok +create materialized view mv as +select tm, foo from t +emit on window close; + +statement ok +insert into t values + ('2023-05-06 16:51:00', 1) +, ('2023-05-06 16:56:00', 8) +, ('2023-05-06 17:30:00', 3) +, ('2023-05-06 17:59:00', 4) +, ('2023-05-06 18:01:00', 6) +; + +query TI +select * from mv order by tm; +---- +2023-05-06 16:51:00 1 +2023-05-06 16:56:00 8 +2023-05-06 17:30:00 3 + +statement ok +insert into t values + ('2023-05-06 18:10:00', 7) +; + +query TI +select * from mv order by tm; +---- +2023-05-06 16:51:00 1 +2023-05-06 16:56:00 8 +2023-05-06 17:30:00 3 +2023-05-06 17:59:00 4 +2023-05-06 18:01:00 6 + +statement ok +drop materialized view mv; + +statement ok +drop table t; diff --git a/e2e_test/streaming/group_top_n/group_top_1.slt b/e2e_test/streaming/group_top_n/group_top_1.slt new file mode 100644 index 0000000000000..2bed792a4ae82 --- /dev/null +++ b/e2e_test/streaming/group_top_n/group_top_1.slt @@ -0,0 +1,45 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +# https://github.com/risingwavelabs/risingwave/issues/12282 + +statement ok +create table t(a int, b int, c int); + +statement ok +create materialized view mv as SELECT * FROM ( + SELECT + *, + row_number() OVER (PARTITION BY a ORDER BY b) AS rank + FROM t +) WHERE rank <= 1; + +statement ok +insert into t values (1, 1, 1); + +query iiiI +select * from mv; +---- +1 1 1 1 + +statement ok +insert into t values (1, 0, 1); + +query iiiI +select * from mv; +---- +1 0 1 1 + +statement ok +insert into t values (1, 0, 1); + +query iiiI +select * from mv; +---- +1 0 1 1 + +statement ok +drop materialized view mv; + +statement ok +drop table t; diff --git a/e2e_test/streaming/group_top_n.slt b/e2e_test/streaming/group_top_n/main1.slt similarity index 100% rename from e2e_test/streaming/group_top_n.slt rename to e2e_test/streaming/group_top_n/main1.slt diff --git a/e2e_test/streaming/non_strict_mode.slt b/e2e_test/streaming/non_strict_mode.slt index 3c026d3232aff..3dc62ac27eec8 100644 --- a/e2e_test/streaming/non_strict_mode.slt +++ b/e2e_test/streaming/non_strict_mode.slt @@ -1,5 +1,8 @@ # Test compute errors are replaced with NULLs. # See also +# +# UPDATE: after , the non-strict +# NULL padding is applied for each expression node instead of the root node only. statement ok create table t(x int); @@ -8,46 +11,37 @@ statement ok insert into t values (0),(1),(2),(NULL); statement ok -create materialized view mv_proj as select 10/x as v from t; +create materialized view mv_proj as select x, 10/x as v from t; statement ok -create materialized view mv_proj_is_null as select 10/x is null as v from t; +create materialized view mv_proj_is_null as select x, 10/x is null as v from t; -query I rowsort -select * from mv_proj; +query I +select * from mv_proj order by x; ---- -10 -5 -NULL -NULL - -# result for 0 is NULL, instead of true -query B rowsort -select * from mv_proj_is_null; +0 NULL +1 10 +2 5 +NULL NULL + +# 10/0 fails, which is replaced with NULL, then NULL `is null` +query T +select * from mv_proj_is_null order by x; ---- -NULL -f -f -t +0 t +1 f +2 f +NULL t statement ok create materialized view mv_filter as select * from t where 10/x > 0; -statement ok -create materialized view mv_filter_is_null as select * from t where 10/x > 0 is null; - query I rowsort select * from mv_filter; ---- 1 2 -# result for 0 is not included -query I rowsort -select * from mv_filter_is_null; ----- -NULL - statement ok drop materialized view mv_proj; @@ -58,7 +52,27 @@ statement ok drop materialized view mv_filter; statement ok -drop materialized view mv_filter_is_null; +drop table t; + +statement ok +create table t(x varchar); + +statement ok +insert into t values ('two'), ('4'); + +statement ok +create materialized view mv_coalesce as select coalesce(x::int, 0) as v from t; + +# convert 'two' to int fails, which is replaced with NULL, then coalesced to 0 +# https://github.com/risingwavelabs/risingwave/issues/11586 +query I rowsort +select * from mv_coalesce; +---- +0 +4 + +statement ok +drop materialized view mv_coalesce; statement ok drop table t; diff --git a/e2e_test/streaming/project_set.slt b/e2e_test/streaming/project_set.slt index 959c75ebebefc..f360663067e3e 100644 --- a/e2e_test/streaming/project_set.slt +++ b/e2e_test/streaming/project_set.slt @@ -107,3 +107,38 @@ with cte as (SELECT 1 as v1, unnest(array[1,2,3,4,5]) AS v2) select v1 from cte; 1 1 1 + +statement ok +create table t(arr varchar[]); + +statement ok +create materialized view mv as select * from t cross join unnest(t.arr) WITH ORDINALITY AS x(elts, num); + +statement ok +insert into t values (Array['a','b', 'c']), (Array['d','e']); + +query I rowsort +select * from mv; +---- +{a,b,c} a 1 +{a,b,c} b 2 +{a,b,c} c 3 +{d,e} d 1 +{d,e} e 2 + +statement ok +update t set arr = Array['a', 'c'] where arr = Array['a','b', 'c']; + +query I rowsort +select * from mv; +---- +{a,c} a 1 +{a,c} c 2 +{d,e} d 1 +{d,e} e 2 + +statement ok +drop materialized view mv; + +statement ok +drop table t; diff --git a/e2e_test/streaming/rate_limit.slt b/e2e_test/streaming/rate_limit.slt index 95e87b9fd681c..539a9736754cf 100644 --- a/e2e_test/streaming/rate_limit.slt +++ b/e2e_test/streaming/rate_limit.slt @@ -2,7 +2,7 @@ statement ok CREATE TABLE t1(v1 int, v2 int); statement ok -SET RW_STREAMING_RATE_LIMIT TO 10000; +SET STREAMING_RATE_LIMIT TO 10000; statement ok CREATE MATERIALIZED VIEW m AS SELECT * FROM t1; diff --git a/e2e_test/streaming/temporal_join/temporal_join_watermark.slt b/e2e_test/streaming/temporal_join/temporal_join_watermark.slt new file mode 100644 index 0000000000000..9479ca6727299 --- /dev/null +++ b/e2e_test/streaming/temporal_join/temporal_join_watermark.slt @@ -0,0 +1,73 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table stream(id1 int, a1 int, b1 int, v1 timestamp with time zone, watermark for v1 as v1 - INTERVAL '10' SECOND) APPEND ONLY; + +# FIXME. If we don't insert at first, it would cause a panic when create eowc_mv. +statement ok +insert into stream values (1, 1, 1, '2023-09-14 06:00:00'); + +statement ok +create table version(id2 int, a2 int, b2 int, primary key (id2)); + +statement ok +create materialized view temporal_join_mv as select id1, a1, id2, v1 from stream left join version FOR SYSTEM_TIME AS OF PROCTIME() on id1 = id2; + +statement ok +create materialized view eowc_mv as select window_start, count(id1) from tumble(temporal_join_mv, v1, interval '5 s') group by window_start emit on window close; + +query IIII rowsort +select * from temporal_join_mv; +---- +1 1 NULL 2023-09-14 06:00:00+00:00 + +skipif in-memory +query IIII rowsort +select * from eowc_mv; +---- + +statement ok +insert into stream values (2, 2, 2, '2023-09-14 06:00:25'); + +sleep 10s + +query IIII rowsort +select * from temporal_join_mv; +---- +1 1 NULL 2023-09-14 06:00:00+00:00 +2 2 NULL 2023-09-14 06:00:25+00:00 + +skipif in-memory +query IIII rowsort +select * from eowc_mv; +---- +2023-09-14 06:00:00+00:00 1 + +statement ok +insert into stream values (3, 3, 3, '2023-09-14 06:00:45'); + +sleep 10s + +query IIII rowsort +select * from temporal_join_mv; +---- +1 1 NULL 2023-09-14 06:00:00+00:00 +2 2 NULL 2023-09-14 06:00:25+00:00 +3 3 NULL 2023-09-14 06:00:45+00:00 + +skipif in-memory +query IIII rowsort +select * from eowc_mv; +---- +2023-09-14 06:00:00+00:00 1 +2023-09-14 06:00:25+00:00 1 + +statement ok +drop table stream cascade; + +statement ok +drop table version cascade; + + + diff --git a/e2e_test/streaming/watermark.slt b/e2e_test/streaming/watermark.slt index f145a3a46e282..d1a29b88188cf 100644 --- a/e2e_test/streaming/watermark.slt +++ b/e2e_test/streaming/watermark.slt @@ -21,11 +21,11 @@ statement ok insert into t values ('2023-05-06 16:56:01', 1); skipif in-memory -sleep 5s +sleep 10s skipif in-memory query TI -select * from mv1; +select * from mv1 order by 2; ---- 2023-05-06 16:51:00 1 2023-05-06 16:51:00 2 @@ -33,16 +33,16 @@ select * from mv1; skipif in-memory query TI -select * from mv2; +select * from mv2 order by 2; ---- 2023-05-06 16:51:00 1 -2023-05-06 16:51:00 2 -2023-05-06 16:51:00 3 2023-05-06 16:51:00 1 -2023-05-06 16:51:00 2 -2023-05-06 16:51:00 3 2023-05-06 16:51:00 1 2023-05-06 16:51:00 2 +2023-05-06 16:51:00 2 +2023-05-06 16:51:00 2 +2023-05-06 16:51:00 3 +2023-05-06 16:51:00 3 2023-05-06 16:51:00 3 statement ok diff --git a/e2e_test/udf/test.py b/e2e_test/udf/test.py index ed1f49e7d4dc5..c34c65e3c232c 100644 --- a/e2e_test/udf/test.py +++ b/e2e_test/udf/test.py @@ -1,3 +1,17 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import socket import struct import sys diff --git a/e2e_test/udf/udf.slt b/e2e_test/udf/udf.slt index 33579a825832e..aebfd3d33e697 100644 --- a/e2e_test/udf/udf.slt +++ b/e2e_test/udf/udf.slt @@ -71,20 +71,20 @@ as return_all_arrays using link 'http://localhost:8815'; query TTTTT rowsort show functions ---- -array_access varchar[], integer varchar (empty) http://localhost:8815 -extract_tcp_info bytea struct (empty) http://localhost:8815 +array_access character varying[], integer character varying (empty) http://localhost:8815 +extract_tcp_info bytea struct (empty) http://localhost:8815 gcd integer, integer integer (empty) http://localhost:8815 gcd integer, integer, integer integer (empty) http://localhost:8815 -hex_to_dec varchar numeric (empty) http://localhost:8815 +hex_to_dec character varying numeric (empty) http://localhost:8815 int_42 (empty) integer (empty) http://localhost:8815 jsonb_access jsonb, integer jsonb (empty) http://localhost:8815 jsonb_array_identity jsonb[] jsonb[] (empty) http://localhost:8815 jsonb_array_struct_identity struct struct (empty) http://localhost:8815 jsonb_concat jsonb[] jsonb (empty) http://localhost:8815 -return_all boolean, smallint, integer, bigint, real, double precision, numeric, date, time without time zone, timestamp without time zone, interval, varchar, bytea, jsonb struct (empty) http://localhost:8815 -return_all_arrays boolean[], smallint[], integer[], bigint[], real[], double precision[], numeric[], date[], time without time zone[], timestamp without time zone[], interval[], varchar[], bytea[], jsonb[] struct (empty) http://localhost:8815 +return_all boolean, smallint, integer, bigint, real, double precision, numeric, date, time without time zone, timestamp without time zone, interval, character varying, bytea, jsonb struct (empty) http://localhost:8815 +return_all_arrays boolean[], smallint[], integer[], bigint[], real[], double precision[], numeric[], date[], time without time zone[], timestamp without time zone[], interval[], character varying[], bytea[], jsonb[] struct (empty) http://localhost:8815 series integer integer (empty) http://localhost:8815 -split varchar struct (empty) http://localhost:8815 +split character varying struct (empty) http://localhost:8815 query I select int_42(); @@ -224,6 +224,35 @@ select (extract_tcp_info(E'\\x45000034a8a8400040065b8ac0a8000ec0a80001035d20b6d9 ---- 192.168.0.14 192.168.0.1 861 8374 +# steaming +# to ensure UDF & UDTF respect visibility + +statement ok +create table t (x int); + +statement ok +create materialized view mv as select gcd(x, x), series(x) from t where x <> 2; + +statement ok +insert into t values (1), (2), (3); + +statement ok +flush; + +query II +select * from mv; +---- +1 0 +3 0 +3 1 +3 2 + +statement ok +drop materialized view mv; + +statement ok +drop table t; + # error handling statement error diff --git a/grafana/risingwave-dev-dashboard.dashboard.py b/grafana/risingwave-dev-dashboard.dashboard.py index 6fa036b257f09..e22837410866b 100644 --- a/grafana/risingwave-dev-dashboard.dashboard.py +++ b/grafana/risingwave-dev-dashboard.dashboard.py @@ -1,6 +1,7 @@ import os import logging import sys + p = os.path.dirname(__file__) sys.path.append(p) from common import * @@ -25,14 +26,14 @@ def section_actor_info(outer_panels): return [ outer_panels.row_collapsed( "Actor/Table Id Info", - [ - panels.table_info("Actor Id Info", - "Mapping from actor id to fragment id", - [panels.table_target(f"{metric('actor_info')}")], excluded_cols), - panels.table_info("Materialized View Info", - "Mapping from materialized view table id to it's internal table ids", - [panels.table_target(f"{metric('table_info')}")], excluded_cols), - ]) + [ + panels.table_info("Actor Id Info", + "Mapping from actor id to fragment id", + [panels.table_target(f"{metric('actor_info')}")], excluded_cols), + panels.table_info("Materialized View Info", + "Mapping from materialized view table id to it's internal table ids", + [panels.table_target(f"{metric('table_info')}")], excluded_cols), + ]) ] @@ -41,53 +42,53 @@ def section_cluster_node(outer_panels): return [ outer_panels.row_collapsed( "Cluster Node", - [ - panels.timeseries_count( - "Node Count", - "The number of each type of RisingWave components alive.", - [ - panels.target(f"sum({metric('worker_num')}) by (worker_type)", - "{{worker_type}}") - ], - ["last"], - ), - panels.timeseries_memory( - "Node Memory", - "The memory usage of each RisingWave component.", - [ - panels.target( - f"avg({metric('process_resident_memory_bytes')}) by (job,instance)", - "{{job}} @ {{instance}}", - ) - ], - ), - panels.timeseries_cpu( - "Node CPU", - "The CPU usage of each RisingWave component.", - [ - panels.target( - f"sum(rate({metric('process_cpu_seconds_total')}[$__rate_interval])) by (job,instance)", - "cpu usage (total) - {{job}} @ {{instance}}", - ), + [ + panels.timeseries_count( + "Node Count", + "The number of each type of RisingWave components alive.", + [ + panels.target(f"sum({metric('worker_num')}) by (worker_type)", + "{{worker_type}}") + ], + ["last"], + ), + panels.timeseries_memory( + "Node Memory", + "The memory usage of each RisingWave component.", + [ + panels.target( + f"avg({metric('process_resident_memory_bytes')}) by (job,instance)", + "{{job}} @ {{instance}}", + ) + ], + ), + panels.timeseries_cpu( + "Node CPU", + "The CPU usage of each RisingWave component.", + [ + panels.target( + f"sum(rate({metric('process_cpu_seconds_total')}[$__rate_interval])) by (job,instance)", + "cpu usage (total) - {{job}} @ {{instance}}", + ), - panels.target( - f"sum(rate({metric('process_cpu_seconds_total')}[$__rate_interval])) by (job,instance) / avg({metric('process_cpu_core_num')}) by (job,instance)", - "cpu usage (avg per core) - {{job}} @ {{instance}}", - ), - ], - ), + panels.target( + f"sum(rate({metric('process_cpu_seconds_total')}[$__rate_interval])) by (job,instance) / avg({metric('process_cpu_core_num')}) by (job,instance)", + "cpu usage (avg per core) - {{job}} @ {{instance}}", + ), + ], + ), - panels.timeseries_count( - "Meta Cluster", - "RW cluster can configure multiple meta nodes to achieve high availability. One is the leader and the " - "rest are the followers.", - [ - panels.target(f"sum({metric('meta_num')}) by (worker_addr,role)", - "{{worker_addr}} @ {{role}}") - ], - ["last"], - ), - ]) + panels.timeseries_count( + "Meta Cluster", + "RW cluster can configure multiple meta nodes to achieve high availability. One is the leader and the " + "rest are the followers.", + [ + panels.target(f"sum({metric('meta_num')}) by (worker_addr,role)", + "{{worker_addr}} @ {{role}}") + ], + ["last"], + ), + ]) ] @@ -96,45 +97,45 @@ def section_recovery_node(outer_panels): return [ outer_panels.row_collapsed( "Recovery", - [ - panels.timeseries_ops( - "Recovery Successful Rate", - "The rate of successful recovery attempts", - [ - panels.target(f"sum(rate({metric('recovery_latency_count')}[$__rate_interval])) by (instance)", - "{{instance}}") - ], - ["last"], - ), - panels.timeseries_count( - "Failed recovery attempts", - "Total number of failed reocovery attempts", - [ - panels.target(f"sum({metric('recovery_failure_cnt')}) by (instance)", - "{{instance}}") - ], - ["last"], - ), - panels.timeseries_latency( - "Recovery latency", - "Time spent in a successful recovery attempt", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('recovery_latency_bucket')}[$__rate_interval])) by (le, instance))", - f"recovery latency p{legend}" + - " - {{instance}}", + [ + panels.timeseries_ops( + "Recovery Successful Rate", + "The rate of successful recovery attempts", + [ + panels.target(f"sum(rate({metric('recovery_latency_count')}[$__rate_interval])) by (instance)", + "{{instance}}") + ], + ["last"], + ), + panels.timeseries_count( + "Failed recovery attempts", + "Total number of failed reocovery attempts", + [ + panels.target(f"sum({metric('recovery_failure_cnt')}) by (instance)", + "{{instance}}") + ], + ["last"], + ), + panels.timeseries_latency( + "Recovery latency", + "Time spent in a successful recovery attempt", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('recovery_latency_bucket')}[$__rate_interval])) by (le, instance))", + f"recovery latency p{legend}" + + " - {{instance}}", + ), + [50, 99, "max"], ), - [50, 99, "max"], - ), - panels.target( - f"sum by (le) (rate({metric('recovery_latency_sum')}[$__rate_interval])) / sum by (le) (rate({metric('recovery_latency_count')}[$__rate_interval]))", - "recovery latency avg", - ), - ], - ["last"], - ) - ]) + panels.target( + f"sum by (le) (rate({metric('recovery_latency_sum')}[$__rate_interval])) / sum by (le) (rate({metric('recovery_latency_count')}[$__rate_interval]))", + "recovery latency avg", + ), + ], + ["last"], + ) + ]) ] @@ -238,7 +239,7 @@ def section_compaction(outer_panels): f"histogram_quantile({quantile}, sum(rate({metric('storage_compact_task_size_bucket')}[$__rate_interval])) by (le, group, type))", f"p{legend}" + " - cg{{group}}@{{type}}", - ), + ), [90, "max"], ), ], @@ -308,17 +309,21 @@ def section_compaction(outer_panels): "KBs read from next level during history compactions to next level", [ panels.target( - f"sum(rate({metric('storage_level_compact_read_next')}[$__rate_interval])) by(job,instance) + sum(rate(" - f"{metric('storage_level_compact_read_curr')}[$__rate_interval])) by(job,instance)", - "read - {{job}} @ {{instance}}", + f"sum(rate({metric('storage_level_compact_read_next')}[$__rate_interval])) by(job) + sum(rate(" + f"{metric('storage_level_compact_read_curr')}[$__rate_interval])) by(job)", + "read - {{job}}", ), panels.target( - f"sum(rate({metric('storage_level_compact_write')}[$__rate_interval])) by(job,instance)", - "write - {{job}} @ {{instance}}", + f"sum(rate({metric('storage_level_compact_write')}[$__rate_interval])) by(job)", + "write - {{job}}", + ), + panels.target( + f"sum(rate({metric('compactor_write_build_l0_bytes')}[$__rate_interval]))by (job)", + "flush - {{job}}", ), panels.target( - f"sum(rate({metric('compactor_write_build_l0_bytes')}[$__rate_interval]))by (job,instance)", - "flush - {{job}} @ {{instance}}", + f"sum(rate({metric('compactor_fast_compact_bytes')}[$__rate_interval]))by (job)", + "fast compact - {{job}}", ), ], ), @@ -636,459 +641,479 @@ def section_object_storage(outer_panels): def section_streaming(outer_panels): panels = outer_panels.sub_panel() - sink_filter = "executor_identity=~\".*SinkExecutor.*\"" - mv_filter = "executor_identity=~\".*MaterializeExecutor.*\"" - table_type_filter = "table_type=~\"MATERIALIZED_VIEW\"" - mv_throughput_query = f'sum(rate({metric("stream_executor_row_count", filter=mv_filter)}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group({metric("table_info", filter=table_type_filter)}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)' - sink_throughput_query = f'sum(rate({metric("stream_executor_row_count", filter=sink_filter)}[$__rate_interval]) * on(actor_id) group_left(sink_name) (group({metric("sink_info")}) by (actor_id, sink_name))) by (sink_name)' return [ outer_panels.row_collapsed( "Streaming", - [ - panels.timeseries_rowsps( - "Source Throughput(rows/s)", - "The figure shows the number of rows read by each source per second.", - [ - panels.target( - f"rate({metric('stream_source_output_rows_counts')}[$__rate_interval])", - "source={{source_name}} actor={{actor_id}} @ {{instance}}", - ), - ], - ), - panels.timeseries_rowsps( - "Source Throughput(rows/s) Per Partition", - "Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of " - "each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(rows).", - [ - panels.target( - f"rate({metric('partition_input_count')}[$__rate_interval])", - "actor={{actor_id}} source={{source_id}} partition={{partition}}", - ) - ], - ), - panels.timeseries_bytesps( - "Source Throughput(MB/s)", - "The figure shows the number of bytes read by each source per second.", - [ - panels.target( - f"(sum by (source_id)(rate({metric('partition_input_bytes')}[$__rate_interval])))/(1000*1000)", - "source={{source_id}}", - ) - ], - ), - panels.timeseries_bytesps( - "Source Throughput(MB/s) Per Partition", - "Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of " - "each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(MB/s).", - [ - panels.target( - f"(rate({metric('partition_input_bytes')}[$__rate_interval]))/(1000*1000)", - "actor={{actor_id}} source={{source_id}} partition={{partition}}", - ) - ], - ), - panels.timeseries_rowsps( - "Source Throughput(rows) per barrier", - "RisingWave ingests barriers periodically to trigger computation and checkpoints. The frequency of " - "barrier can be set by barrier_interval_ms. This metric shows how many rows are ingested between two " - "consecutive barriers.", - [ - panels.target( - f"rate({metric('stream_source_rows_per_barrier_counts')}[$__rate_interval])", - "actor={{actor_id}} source={{source_id}} @ {{instance}}" - ) - ] - ), - panels.timeseries_count( - "Source Upstream Status", - "Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.", - [ - panels.target( - f"{metric('source_status_is_up')}", - "source_id={{source_id}}, source_name={{source_name}} @ {{instance}}" - ) - ] - ), - panels.timeseries_ops( - "Source Split Change Events frequency(events/s)", - "Source Split Change Events frequency by source_id and actor_id", - [ - panels.target( - f"rate({metric('stream_source_split_change_event_count')}[$__rate_interval])", - "source={{source_name}} actor={{actor_id}} @ {{instance}}" - ) - ] - ), - panels.timeseries_count( - "Kafka Consumer Lag Size", - "Kafka Consumer Lag Size by source_id, partition and actor_id", - [ - panels.target( - f"{metric('high_watermark')}", - "source={{source_id}} partition={{partition}}" - ), - panels.target( - f"{metric('latest_message_id')}", - "source={{source_id}} partition={{partition}} actor_id={{actor_id}}" - ) - ] - ), - panels.timeseries_rowsps( - "Sink Throughput(rows/s)", - "The figure shows the number of rows output by each sink per second.", - [ - panels.target( - sink_throughput_query, - "sink {{sink_name}}", - ), - ], - ), - - panels.timeseries_rowsps( - "Materialized View Throughput(rows/s)", - "The figure shows the number of rows written into each materialized view per second.", - [ - panels.target( - mv_throughput_query, - "materialized view {{table_name}} table_id {{materialized_view_id}}", - ), - ], - ), - panels.timeseries_rowsps( - "Backfill Snapshot Read Throughput(rows)", - "Total number of rows that have been read from the backfill snapshot", - [ - panels.target( - f"rate({table_metric('stream_backfill_snapshot_read_row_count')}[$__rate_interval])", - "table_id={{table_id}} actor={{actor_id}} @ {{instance}}" - ), - ], - ), - panels.timeseries_rowsps( - "Backfill Upstream Throughput(rows)", - "Total number of rows that have been output from the backfill upstream", - [ - panels.target( - f"rate({table_metric('stream_backfill_upstream_output_row_count')}[$__rate_interval])", - "table_id={{table_id}} actor={{actor_id}} @ {{instance}}" - ), - ], - ), - panels.timeseries_count( - "Barrier Number", - "The number of barriers that have been ingested but not completely processed. This metric reflects the " - "current level of congestion within the system.", - [ - panels.target(f"{metric('all_barrier_nums')}", "all_barrier"), - panels.target( - f"{metric('in_flight_barrier_nums')}", "in_flight_barrier"), - ], - ), - panels.timeseries_latency( - "Barrier Send Latency", - "The duration between the time point when the scheduled barrier needs to be sent and the time point when " - "the barrier gets actually sent to all the compute nodes. Developers can thus detect any internal " - "congestion.", - quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('meta_barrier_send_duration_seconds_bucket')}[$__rate_interval])) by (le))", - f"barrier_send_latency_p{legend}", - ), - [50, 90, 99, 999, "max"], - ) + [ - panels.target( - f"rate({metric('meta_barrier_send_duration_seconds_sum')}[$__rate_interval]) / rate({metric('meta_barrier_send_duration_seconds_count')}[$__rate_interval])", - "barrier_send_latency_avg", - ), - ], - ), - panels.timeseries_latency( - "Barrier Latency", - "The time that the data between two consecutive barriers gets fully processed, i.e. the computation " - "results are made durable into materialized views or sink to external systems. This metric shows to users " - "the freshness of materialized views.", - quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('meta_barrier_duration_seconds_bucket')}[$__rate_interval])) by (le))", - f"barrier_latency_p{legend}", - ), - [50, 90, 99, 999, "max"], - ) + [ - panels.target( - f"rate({metric('meta_barrier_duration_seconds_sum')}[$__rate_interval]) / rate({metric('meta_barrier_duration_seconds_count')}[$__rate_interval])", - "barrier_latency_avg", - ), - ], - ), - panels.timeseries_latency( - "Barrier In-Flight Latency", - "", - quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('stream_barrier_inflight_duration_seconds_bucket')}[$__rate_interval])) by (le))", - f"barrier_inflight_latency_p{legend}", - ), - [50, 90, 99, 999, "max"], - ) + [ - panels.target( - f"max(sum by(le, instance)(rate({metric('stream_barrier_inflight_duration_seconds_sum')}[$__rate_interval])) / sum by(le, instance)(rate({metric('stream_barrier_inflight_duration_seconds_count')}[$__rate_interval])))", - "barrier_inflight_latency_avg", - ), - ], - ), - panels.timeseries_latency( - "Barrier Sync Latency", - "", - quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('stream_barrier_sync_storage_duration_seconds_bucket')}[$__rate_interval])) by (le,instance))", - f"barrier_sync_latency_p{legend}" + " - {{instance}}", - ), - [50, 90, 99, 999, "max"], - ) + [ - panels.target( - f"sum by(le, instance)(rate({metric('stream_barrier_sync_storage_duration_seconds_sum')}[$__rate_interval])) / sum by(le, instance)(rate({metric('stream_barrier_sync_storage_duration_seconds_count')}[$__rate_interval]))", - "barrier_sync_latency_avg - {{instance}}", - ), - ], - ), - panels.timeseries_latency( - "Barrier Wait Commit Latency", - "", - quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('meta_barrier_wait_commit_duration_seconds_bucket')}[$__rate_interval])) by (le))", - f"barrier_wait_commit_latency_p{legend}", - ), - [50, 90, 99, 999, "max"], - ) + [ - panels.target( - f"rate({metric('meta_barrier_wait_commit_duration_seconds_sum')}[$__rate_interval]) / rate({metric('meta_barrier_wait_commit_duration_seconds_count')}[$__rate_interval])", - "barrier_wait_commit_avg", - ), - ], - ), - panels.timeseries_ops( - "Earliest In-Flight Barrier Progress", - "The number of actors that have processed the earliest in-flight barriers per second. " - "This metric helps users to detect potential congestion or stuck in the system.", - [ - panels.target( - f"rate({metric('stream_barrier_manager_progress')}[$__rate_interval])", - "{{instance}}", - ), - ], - ), - ]) - ] - - -def section_streaming_actors(outer_panels): - panels = outer_panels.sub_panel() - return [ - outer_panels.row_collapsed( - "Streaming Actors", [ - panels.timeseries_actor_rowsps( - "Executor Throughput", - "When enabled, this metric shows the input throughput of each executor.", + panels.timeseries_rowsps( + "Source Throughput(rows/s)", + "The figure shows the number of rows read by each source per second.", [ panels.target( - f"rate({metric('stream_executor_row_count')}[$__rate_interval]) > 0", - "actor {{actor_id}}->{{executor_identity}}", + f"rate({metric('stream_source_output_rows_counts')}[$__rate_interval])", + "source={{source_name}} actor={{actor_id}} @ {{instance}}", ), ], ), - panels.timeseries_percentage( - "Actor Backpressure", - "We first record the total blocking duration(ns) of output buffer of each actor. It shows how " - "much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, " - "on average. Then we divide this duration by 1 second and show it as a percentage.", + panels.timeseries_rowsps( + "Source Throughput(rows/s) Per Partition", + "Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of " + "each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(rows).", [ panels.target( - f"rate({metric('stream_actor_output_buffer_blocking_duration_ns')}[$__rate_interval]) / 1000000000", - "{{actor_id}}", - ), + f"rate({metric('partition_input_count')}[$__rate_interval])", + "actor={{actor_id}} source={{source_id}} partition={{partition}}", + ) ], ), - panels.timeseries_bytes( - "Actor Memory Usage (TaskLocalAlloc)", - "", + panels.timeseries_bytesps( + "Source Throughput(MB/s)", + "The figure shows the number of bytes read by each source per second.", [ panels.target( - f"{metric('actor_memory_usage')}", - "{{actor_id}}", - ), + f"(sum by (source_id)(rate({metric('partition_input_bytes')}[$__rate_interval])))/(1000*1000)", + "source={{source_id}}", + ) ], ), - panels.timeseries_bytes( - "Executor Memory Usage", - "", + panels.timeseries_bytesps( + "Source Throughput(MB/s) Per Partition", + "Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of " + "each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(MB/s).", [ panels.target( - f"{metric('stream_memory_usage')}", - "table {{table_id}} actor {{actor_id}} desc: {{desc}}", - ), + f"(rate({metric('partition_input_bytes')}[$__rate_interval]))/(1000*1000)", + "actor={{actor_id}} source={{source_id}} partition={{partition}}", + ) ], ), - - panels.timeseries_bytes( - "Materialized View Memory Usage", - "Materialzed View Memory Usage", + panels.timeseries_rowsps( + "Source Throughput(rows) per barrier", + "RisingWave ingests barriers periodically to trigger computation and checkpoints. The frequency of " + "barrier can be set by barrier_interval_ms. This metric shows how many rows are ingested between two " + "consecutive barriers.", [ panels.target( - f"sum({metric('stream_memory_usage')} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)", - "materialized_view {{materialized_view_id}}", - ), - ], + f"rate({metric('stream_source_rows_per_barrier_counts')}[$__rate_interval])", + "actor={{actor_id}} source={{source_id}} @ {{instance}}" + ) + ] ), - panels.timeseries_percentage( - "Actor Input Blocking Time Ratio", - "", + panels.timeseries_count( + "Source Upstream Status", + "Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.", + [ + panels.target( + f"{metric('source_status_is_up')}", + "source_id={{source_id}}, source_name={{source_name}} @ {{instance}}" + ) + ] + ), + panels.timeseries_ops( + "Source Split Change Events frequency(events/s)", + "Source Split Change Events frequency by source_id and actor_id", + [ + panels.target( + f"rate({metric('stream_source_split_change_event_count')}[$__rate_interval])", + "source={{source_name}} actor={{actor_id}} @ {{instance}}" + ) + ] + ), + panels.timeseries_count( + "Kafka Consumer Lag Size", + "Kafka Consumer Lag Size by source_id, partition and actor_id", [ panels.target( - f"rate({metric('stream_actor_input_buffer_blocking_duration_ns')}[$__rate_interval]) / 1000000000", - "{{actor_id}}->{{upstream_fragment_id}}", + f"{metric('high_watermark')}", + "source={{source_id}} partition={{partition}}" ), - ], + panels.target( + f"{metric('latest_message_id')}", + "source={{source_id}} partition={{partition}} actor_id={{actor_id}}" + ) + ] ), - panels.timeseries_actor_latency( - "Actor Barrier Latency", - "", + panels.timeseries_rowsps( + "Sink Throughput(rows/s)", + "The number of rows streamed into each sink per second.", [ panels.target( - f"rate({metric('stream_actor_barrier_time')}[$__rate_interval]) > 0", - "{{actor_id}}", + f"sum(rate({metric('stream_sink_input_row_count')}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group({metric('sink_info')}) by (sink_id, sink_name)", + "sink {{sink_id}} {{sink_name}}", ), ], ), - panels.timeseries_actor_latency( - "Actor Processing Time", - "", + panels.timeseries_rowsps( + "Sink Throughput(rows/s) per Partition", + "The number of rows streamed into each sink per second.", [ panels.target( - f"rate({metric('stream_actor_processing_time')}[$__rate_interval]) > 0", - "{{actor_id}}", + f"sum(rate({metric('stream_sink_input_row_count')}[$__rate_interval])) by (sink_id, actor_id) * on(actor_id) group_left(sink_name) {metric('sink_info')}", + "sink {{sink_id}} {{sink_name}} - actor {{actor_id}}", ), ], ), - panels.timeseries_actor_latency( - "Actor Execution Time", - "", + panels.timeseries_rowsps( + "Materialized View Throughput(rows/s)", + "The figure shows the number of rows written into each materialized view per second.", [ panels.target( - f"rate({metric('stream_actor_actor_execution_time')}[$__rate_interval]) > 0", - "{{actor_id}}", + f"sum(rate({metric('stream_mview_input_row_count')}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group({metric('table_info')}) by (table_id, table_name)", + "mview {{table_id}} {{table_name}}", ), ], ), - panels.timeseries_row( - "Actor Input Row", - "", + panels.timeseries_rowsps( + "Materialized View Throughput(rows/s) per Partition", + "The figure shows the number of rows written into each materialized view per second.", [ panels.target( - f"rate({metric('stream_actor_in_record_cnt')}[$__rate_interval]) > 0", - "{{actor_id}}", + f"sum(rate({metric('stream_mview_input_row_count')}[$__rate_interval])) by (actor_id, table_id) * on(actor_id, table_id) group_left(table_name) {metric('table_info')}", + "mview {{table_id}} {{table_name}} - actor {{actor_id}}", ), ], ), - panels.timeseries_row( - "Actor Output Row", - "", + panels.timeseries_rowsps( + "Backfill Snapshot Read Throughput(rows)", + "Total number of rows that have been read from the backfill snapshot", [ panels.target( - f"rate({metric('stream_actor_out_record_cnt')}[$__rate_interval]) > 0", - "{{actor_id}}", + f"rate({table_metric('stream_backfill_snapshot_read_row_count')}[$__rate_interval])", + "table_id={{table_id}} actor={{actor_id}} @ {{instance}}" ), ], ), - panels.timeseries_actor_ops( - "Join Executor Cache", - "", + panels.timeseries_rowsps( + "Backfill Upstream Throughput(rows)", + "Total number of rows that have been output from the backfill upstream", [ panels.target( - f"rate({metric('stream_join_lookup_miss_count')}[$__rate_interval])", - "cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}} ", + f"rate({table_metric('stream_backfill_upstream_output_row_count')}[$__rate_interval])", + "table_id={{table_id}} actor={{actor_id}} @ {{instance}}" ), + ], + ), + panels.timeseries_count( + "Barrier Number", + "The number of barriers that have been ingested but not completely processed. This metric reflects the " + "current level of congestion within the system.", + [ + panels.target(f"{metric('all_barrier_nums')}", "all_barrier"), panels.target( - f"rate({metric('stream_join_lookup_total_count')}[$__rate_interval])", - "total lookups {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}", + f"{metric('in_flight_barrier_nums')}", "in_flight_barrier"), + ], + ), + panels.timeseries_latency( + "Barrier Send Latency", + "The duration between the time point when the scheduled barrier needs to be sent and the time point when " + "the barrier gets actually sent to all the compute nodes. Developers can thus detect any internal " + "congestion.", + quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('meta_barrier_send_duration_seconds_bucket')}[$__rate_interval])) by (le))", + f"barrier_send_latency_p{legend}", ), + [50, 90, 99, 999, "max"], + ) + [ panels.target( - f"rate({metric('stream_join_insert_cache_miss_count')}[$__rate_interval])", - "cache miss when insert {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}", + f"rate({metric('meta_barrier_send_duration_seconds_sum')}[$__rate_interval]) / rate({metric('meta_barrier_send_duration_seconds_count')}[$__rate_interval])", + "barrier_send_latency_avg", ), ], ), - - panels.timeseries_actor_ops( - "Temporal Join Executor Cache", - "", - [ + panels.timeseries_latency( + "Barrier Latency", + "The time that the data between two consecutive barriers gets fully processed, i.e. the computation " + "results are made durable into materialized views or sink to external systems. This metric shows to users " + "the freshness of materialized views.", + quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('meta_barrier_duration_seconds_bucket')}[$__rate_interval])) by (le))", + f"barrier_latency_p{legend}", + ), + [50, 90, 99, 999, "max"], + ) + [ panels.target( - f"rate({metric('stream_temporal_join_cache_miss_count')}[$__rate_interval])", - "temporal join cache miss, table_id {{table_id}} actor {{actor_id}}", + f"rate({metric('meta_barrier_duration_seconds_sum')}[$__rate_interval]) / rate({metric('meta_barrier_duration_seconds_count')}[$__rate_interval])", + "barrier_latency_avg", ), ], ), - panels.timeseries_actor_ops( - "Materialize Executor Cache", + panels.timeseries_latency( + "Barrier In-Flight Latency", "", - [ - panels.target( - f"rate({table_metric('stream_materialize_cache_hit_count')}[$__rate_interval])", - "cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}", + quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('stream_barrier_inflight_duration_seconds_bucket')}[$__rate_interval])) by (le))", + f"barrier_inflight_latency_p{legend}", ), + [50, 90, 99, 999, "max"], + ) + [ panels.target( - f"rate({table_metric('stream_materialize_cache_total_count')}[$__rate_interval])", - "total cached count - table {{table_id}} - actor {{actor_id}} {{instance}}", + f"max(sum by(le, instance)(rate({metric('stream_barrier_inflight_duration_seconds_sum')}[$__rate_interval])) / sum by(le, instance)(rate({metric('stream_barrier_inflight_duration_seconds_count')}[$__rate_interval])))", + "barrier_inflight_latency_avg", ), ], ), - panels.timeseries_percentage( - "Executor Cache Miss Ratio", + panels.timeseries_latency( + "Barrier Sync Latency", "", - [ + quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('stream_barrier_sync_storage_duration_seconds_bucket')}[$__rate_interval])) by (le,instance))", + f"barrier_sync_latency_p{legend}" + " - {{instance}}", + ), + [50, 90, 99, 999, "max"], + ) + [ + panels.target( + f"sum by(le, instance)(rate({metric('stream_barrier_sync_storage_duration_seconds_sum')}[$__rate_interval])) / sum by(le, instance)(rate({metric('stream_barrier_sync_storage_duration_seconds_count')}[$__rate_interval]))", + "barrier_sync_latency_avg - {{instance}}", + ), + ], + ), + panels.timeseries_latency( + "Barrier Wait Commit Latency", + "", + quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('meta_barrier_wait_commit_duration_seconds_bucket')}[$__rate_interval])) by (le))", + f"barrier_wait_commit_latency_p{legend}", + ), + [50, 90, 99, 999, "max"], + ) + [ + panels.target( + f"rate({metric('meta_barrier_wait_commit_duration_seconds_sum')}[$__rate_interval]) / rate({metric('meta_barrier_wait_commit_duration_seconds_count')}[$__rate_interval])", + "barrier_wait_commit_avg", + ), + ], + ), + panels.timeseries_ops( + "Earliest In-Flight Barrier Progress", + "The number of actors that have processed the earliest in-flight barriers per second. " + "This metric helps users to detect potential congestion or stuck in the system.", + [ + panels.target( + f"rate({metric('stream_barrier_manager_progress')}[$__rate_interval])", + "{{instance}}", + ), + ], + ), + ]) + ] + + +def section_streaming_actors(outer_panels): + panels = outer_panels.sub_panel() + return [ + outer_panels.row_collapsed( + "Streaming Actors", + [ + panels.timeseries_actor_rowsps( + "Executor Throughput", + "When enabled, this metric shows the input throughput of each executor.", + [ + panels.target( + f"rate({metric('stream_executor_row_count')}[$__rate_interval])", + "{{executor_identity}} actor {{actor_id}}", + ), + ], + ), + panels.timeseries_percentage( + "Actor Output Blocking Time Ratio (Backpressure)", + "We first record the total blocking duration(ns) of output buffer of each actor. It shows how " + "much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, " + "on average. Then we divide this duration by 1 second and show it as a percentage.", + [ + panels.target( + f"avg(rate({metric('stream_actor_output_buffer_blocking_duration_ns')}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000", + "fragment {{fragment_id}}->{{downstream_fragment_id}}", + ), + ], + ), + panels.timeseries_bytes( + "Actor Memory Usage (TaskLocalAlloc)", + "", + [ + panels.target( + f"{metric('actor_memory_usage')}", + "{{actor_id}}", + ), + ], + ), + panels.timeseries_bytes( + "Executor Memory Usage", + "", + [ + panels.target( + f"{metric('stream_memory_usage')}", + "table {{table_id}} actor {{actor_id}} desc: {{desc}}", + ), + ], + ), + + panels.timeseries_bytes( + "Materialized View Memory Usage", + "Materialzed View Memory Usage", + [ + panels.target( + f"sum({metric('stream_memory_usage')} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)", + "materialized_view {{materialized_view_id}}", + ), + ], + ), + panels.timeseries_percentage( + "Actor Input Blocking Time Ratio", + "", + [ + panels.target( + f"avg(rate({metric('stream_actor_input_buffer_blocking_duration_ns')}[$__rate_interval])) by (fragment_id, upstream_fragment_id) / 1000000000", + "fragment {{fragment_id}}<-{{upstream_fragment_id}}", + ), + ], + ), + panels.timeseries_actor_latency( + "Actor Barrier Latency", + "", + [ + panels.target( + f"rate({metric('stream_actor_barrier_time')}[$__rate_interval]) > 0", + "{{actor_id}}", + ), + ], + ), + panels.timeseries_actor_latency( + "Actor Processing Time", + "", + [ + panels.target( + f"rate({metric('stream_actor_processing_time')}[$__rate_interval]) > 0", + "{{actor_id}}", + ), + ], + ), + panels.timeseries_actor_latency( + "Actor Execution Time", + "", + [ + panels.target( + f"rate({metric('stream_actor_actor_execution_time')}[$__rate_interval]) > 0", + "{{actor_id}}", + ), + ], + ), + panels.timeseries_row( + "Actor Input Row", + "", + [ + panels.target( + f"rate({metric('stream_actor_in_record_cnt')}[$__rate_interval]) > 0", + "{{actor_id}}", + ), + ], + ), + panels.timeseries_row( + "Actor Output Row", + "", + [ + panels.target( + f"rate({metric('stream_actor_out_record_cnt')}[$__rate_interval]) > 0", + "{{actor_id}}", + ), + ], + ), + panels.timeseries_actor_ops( + "Join Executor Cache", + "", + [ + panels.target( + f"rate({metric('stream_join_lookup_miss_count')}[$__rate_interval])", + "cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}} ", + ), + panels.target( + f"rate({metric('stream_join_lookup_total_count')}[$__rate_interval])", + "total lookups {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_join_insert_cache_miss_count')}[$__rate_interval])", + "cache miss when insert {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}", + ), + ], + ), + + panels.timeseries_actor_ops( + "Temporal Join Executor Cache", + "", + [ + panels.target( + f"rate({metric('stream_temporal_join_cache_miss_count')}[$__rate_interval])", + "temporal join cache miss, table_id {{table_id}} actor {{actor_id}}", + ), + ], + ), + panels.timeseries_actor_ops( + "Materialize Executor Cache", + "", + [ + panels.target( + f"rate({table_metric('stream_materialize_cache_hit_count')}[$__rate_interval])", + "cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}", + ), + panels.target( + f"rate({table_metric('stream_materialize_cache_total_count')}[$__rate_interval])", + "total cached count - table {{table_id}} - actor {{actor_id}} {{instance}}", + ), + ], + ), + panels.timeseries_actor_ops( + "Over Window Executor Cache", + "", + [ + panels.target( + f"rate({table_metric('stream_over_window_cache_lookup_count')}[$__rate_interval])", + "cache lookup count - table {{table_id}} - actor {{actor_id}} {{instance}}", + ), + panels.target( + f"rate({table_metric('stream_over_window_cache_miss_count')}[$__rate_interval])", + "cache miss count - table {{table_id}} - actor {{actor_id}} {{instance}}", + ), + ] + ), + panels.timeseries_percentage( + "Executor Cache Miss Ratio", + "", + [ panels.target( f"(sum(rate({metric('stream_join_lookup_miss_count')}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate({metric('stream_join_lookup_total_count')}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))", "join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}", ), - panels.target( f"(sum(rate({metric('stream_agg_lookup_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_agg_lookup_total_count')}[$__rate_interval])) by (table_id, actor_id))", "Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_agg_distinct_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_agg_distinct_total_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_group_top_n_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_group_top_n_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_group_top_n_appendonly_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_group_top_n_appendonly_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_lookup_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_lookup_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_temporal_join_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_temporal_join_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"1 - (sum(rate({metric('stream_materialize_cache_hit_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_materialize_cache_total_count')}[$__rate_interval])) by (table_id, actor_id))", "materialize executor cache miss ratio - table {{table_id}} actor {{actor_id}} {{instance}}", ), - panels.target( f"(sum(rate({metric('stream_over_window_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_over_window_cache_lookup_count')}[$__rate_interval])) by (table_id, actor_id))", "Over window cache miss ratio - table {{table_id}} actor {{actor_id}} ", @@ -1133,29 +1158,11 @@ def section_streaming_actors(outer_panels): ], ), panels.timeseries_count( - "Join Cached Entries", + "Join Cached Keys", "Multiple rows with distinct primary keys may have the same join key. This metric counts the " "number of join keys in the executor cache.", [ - panels.target(f"{metric('stream_join_cached_entries')}", - "{{actor_id}} {{side}}"), - ], - ), - panels.timeseries_count( - "Join Cached Rows", - "Multiple rows with distinct primary keys may have the same join key. This metric counts the " - "number of rows in the executor cache.", - [ - panels.target(f"{metric('stream_join_cached_rows')}", - "{{actor_id}} {{side}}"), - ], - ), - panels.timeseries_bytes( - "Join Cached Estimated Size", - "Multiple rows with distinct primary keys may have the same join key. This metric counts the " - "size of rows in the executor cache.", - [ - panels.target(f"{metric('stream_join_cached_estimated_size')}", + panels.target(f"{metric('stream_join_cached_entry_count')}", "{{actor_id}} {{side}}"), ], ), @@ -1184,31 +1191,43 @@ def section_streaming_actors(outer_panels): [ panels.target( f"rate({metric('stream_agg_lookup_miss_count')}[$__rate_interval])", - "cache miss - table {{table_id}} actor {{actor_id}}", + "stream agg cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_agg_lookup_total_count')}[$__rate_interval])", + "stream agg total lookups - table {{table_id}} actor {{actor_id}}", ), panels.target( f"rate({metric('stream_agg_distinct_cache_miss_count')}[$__rate_interval])", - "Distinct agg cache miss - table {{table_id}} actor {{actor_id}}", + "distinct agg cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_agg_distinct_total_cache_count')}[$__rate_interval])", + "distinct agg total lookups - table {{table_id}} actor {{actor_id}}", ), - panels.target( f"rate({metric('stream_group_top_n_cache_miss_count')}[$__rate_interval])", - "Group top n cache miss - table {{table_id}} actor {{actor_id}}", + "group top n cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_group_top_n_total_query_cache_count')}[$__rate_interval])", + "group top n total lookups - table {{table_id}} actor {{actor_id}}", ), - panels.target( f"rate({metric('stream_group_top_n_appendonly_cache_miss_count')}[$__rate_interval])", - "Group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}", + "group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}", ), - panels.target( - f"rate({metric('stream_agg_lookup_total_count')}[$__rate_interval])", - "stream agg total lookups - table {{table_id}} actor {{actor_id}}", + f"rate({metric('stream_group_top_n_appendonly_total_query_cache_count')}[$__rate_interval])", + "group top n appendonly total lookups - table {{table_id}} actor {{actor_id}}", ), - panels.target( f"rate({metric('stream_lookup_cache_miss_count')}[$__rate_interval])", - "Lookup executor cache miss - table {{table_id}} actor {{actor_id}}", + "lookup executor cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_lookup_total_query_cache_count')}[$__rate_interval])", + "lookup executor total lookups - table {{table_id}} actor {{actor_id}}", ), ], ), @@ -1230,12 +1249,28 @@ def section_streaming_actors(outer_panels): "Aggregation Cached Keys", "The number of keys cached in each hash aggregation executor's executor cache.", [ - panels.target(f"{metric('stream_agg_cached_keys')}", + panels.target(f"{metric('stream_agg_cached_entry_count')}", "stream agg cached keys count | table {{table_id}} actor {{actor_id}}"), - panels.target(f"{metric('stream_agg_distinct_cached_keys')}", + panels.target(f"{metric('stream_agg_distinct_cached_entry_count')}", "stream agg distinct cached keys count |table {{table_id}} actor {{actor_id}}"), ], ), + panels.timeseries_count( + "Aggregation Dirty Groups Count", + "The number of dirty (unflushed) groups in each hash aggregation executor's executor cache.", + [ + panels.target(f"{metric('stream_agg_dirty_groups_count')}", + "stream agg dirty groups count | table {{table_id}} actor {{actor_id}}"), + ], + ), + panels.timeseries_bytes( + "Aggregation Dirty Groups Heap Size", + "The total heap size of dirty (unflushed) groups in each hash aggregation executor's executor cache.", + [ + panels.target(f"{metric('stream_agg_dirty_groups_heap_size')}", + "stream agg dirty groups heap size | table {{table_id}} actor {{actor_id}}"), + ], + ), panels.timeseries_count( "TopN Cached Keys", "The number of keys cached in each top_n executor's executor cache.", @@ -1247,7 +1282,7 @@ def section_streaming_actors(outer_panels): ], ), panels.timeseries_count( - "Temporal Join Cache Count", + "Temporal Join Cache Keys", "The number of keys cached in temporal join executor's executor cache.", [ panels.target(f"{metric('stream_temporal_join_cached_entry_count')}", @@ -1255,7 +1290,6 @@ def section_streaming_actors(outer_panels): ], ), - panels.timeseries_count( "Lookup Cached Keys", "The number of keys cached in lookup executor's executor cache.", @@ -1265,24 +1299,13 @@ def section_streaming_actors(outer_panels): ], ), - - panels.timeseries_actor_ops( - "Over Window Executor Cache", - "", + panels.timeseries_count( + "Over Window Cached Keys", + "The number of keys cached in over window executor's executor cache.", [ - panels.target( - f"rate({table_metric('stream_over_window_cached_entry_count')}[$__rate_interval])", - "cached entry count - table {{table_id}} - actor {{actor_id}} {{instance}}", - ), - panels.target( - f"rate({table_metric('stream_over_window_cache_lookup_count')}[$__rate_interval])", - "cache lookup count - table {{table_id}} - actor {{actor_id}} {{instance}}", - ), - panels.target( - f"rate({table_metric('stream_over_window_cache_miss_count')}[$__rate_interval])", - "cache miss count - table {{table_id}} - actor {{actor_id}} {{instance}}", - ), - ] + panels.target(f"{metric('stream_over_window_cached_entry_count')}", + "over window cached count | table {{table_id}} actor {{actor_id}}"), + ], ), ], ) @@ -1568,6 +1591,38 @@ def section_batch(outer_panels): ), ], ), + + panels.timeseries_bytes( + "Mem Table Size", + "This metric shows the memory usage of mem_table.", + [ + panels.target( + f"sum({metric('state_store_mem_table_memory_size')}) by (job,instance)", + "mem_table size total - {{job}} @ {{instance}}", + ), + + panels.target( + f"{metric('state_store_mem_table_memory_size')}", + "mem_table size - table id {{table_id}} instance id {{instance_id}} {{job}} @ {{instance}}", + ), + ], + ), + + panels.timeseries_count( + "Mem Table Count", + "This metric shows the item counts in mem_table.", + [ + panels.target( + f"sum({metric('state_store_mem_table_item_count')}) by (job,instance)", + "mem_table counts total - {{job}} @ {{instance}}", + ), + + panels.target( + f"{metric('state_store_mem_table_item_count')}", + "mem_table count - table id {{table_id}} instance id {{instance_id}} {{job}} @ {{instance}}", + ), + ], + ), panels.timeseries_latency( "Row SeqScan Next Duration", "", @@ -1706,605 +1761,600 @@ def section_hummock_read(outer_panels): return [ outer_panels.row_collapsed( "Hummock (Read)", - [ - panels.timeseries_ops( - "Cache Ops", - "", - [ - panels.target( - f"sum(rate({table_metric('state_store_sst_store_block_request_counts')}[$__rate_interval])) by (job, instance, table_id, type)", - "{{table_id}} @ {{type}} - {{job}} @ {{instance}}", - ), - panels.target( - f"sum(rate({metric('state_store_sst_store_block_request_counts', meta_miss_filter)}[$__rate_interval])) by (job, instance, type)", - "total_meta_miss_count - {{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_bytes( - "Cache Size", - "Hummock has three parts of memory usage: 1. Meta Cache 2. Block Cache" - "This metric shows the real memory usage of each of these three caches.", - [ - panels.target( - f"avg({metric('state_store_meta_cache_size')}) by (job,instance)", - "meta cache - {{job}} @ {{instance}}", - ), - panels.target( - f"avg({metric('state_store_block_cache_size')}) by (job,instance)", - "data cache - {{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_percentage( - "Cache Miss Rate", - "", - [ - panels.target( - f"(sum(rate({table_metric('state_store_sst_store_block_request_counts', meta_miss_filter)}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate({table_metric('state_store_sst_store_block_request_counts', meta_total_filter)}[$__rate_interval])) by (job,instance,table_id))", - "meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}", - ), - panels.target( - f"(sum(rate({table_metric('state_store_sst_store_block_request_counts', data_miss_filter)}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate({table_metric('state_store_sst_store_block_request_counts', data_total_filter)}[$__rate_interval])) by (job,instance,table_id))", - "block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_ops( - "Iter keys flow", - "", - [ - panels.target( - f"sum(rate({table_metric('state_store_iter_scan_key_counts')}[$__rate_interval])) by (instance, type, table_id)", - "iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ", - ), - ], - ), - panels.timeseries_count( - "Read Merged SSTs", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_merge_sstable_counts_bucket')}[$__rate_interval])) by (le, job, table_id, type))", - f"# merged ssts p{legend}" + - " - {{table_id}} @ {{job}} @ {{type}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance, table_id)(rate({table_metric('state_store_iter_merge_sstable_counts_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate({table_metric('state_store_iter_merge_sstable_counts_count')}[$__rate_interval]))", - "# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_latency( - "Read Duration - Get", - "Histogram of the latency of Get operations that have been issued to the state store.", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_get_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"p{legend}" + " - {{table_id}} @ {{job}} @ {{instance}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance, table_id)(rate({table_metric('state_store_get_duration_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate({table_metric('state_store_get_duration_count')}[$__rate_interval]))", - "avg - {{table_id}} {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_latency( - "Read Duration - Iter", - "Histogram of the time spent on iterator initialization." - "Histogram of the time spent on iterator scanning.", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_init_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"create_iter_time p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance)(rate({metric('state_store_iter_init_duration_sum')}[$__rate_interval])) / sum by(le, job,instance) (rate({metric('state_store_iter_init_duration_count')}[$__rate_interval]))", - "create_iter_time avg - {{job}} @ {{instance}}", - ), - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_scan_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"pure_scan_time p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance)(rate({metric('state_store_iter_scan_duration_sum')}[$__rate_interval])) / sum by(le, job,instance) (rate({metric('state_store_iter_scan_duration_count')}[$__rate_interval]))", - "pure_scan_time avg - {{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_ops( - "Bloom Filter Ops", - "", - [ - panels.target( - f"sum(irate({table_metric('state_store_read_req_positive_but_non_exist_counts')}[$__rate_interval])) by (table_id,type)", - "bloom filter false positive count - {{table_id}} - {{type}}", - ), - panels.target( - f"sum(irate({table_metric('state_store_read_req_bloom_filter_positive_counts')}[$__rate_interval])) by (table_id,type)", - "bloom filter positive count - {{table_id}} - {{type}}", - ), - panels.target( - f"sum(irate({table_metric('state_store_read_req_check_bloom_filter_counts')}[$__rate_interval])) by (table_id,type)", - "bloom filter check count- {{table_id}} - {{type}}", - ), - ], - ), - - panels.timeseries_percentage( - "Bloom Filter Positive Rate", - "Positive / Total", - [ - panels.target( - f"(sum(rate({table_metric('state_store_read_req_bloom_filter_positive_counts')}[$__rate_interval])) by (table_id,type)) / (sum(rate({table_metric('state_store_read_req_check_bloom_filter_counts')}[$__rate_interval])) by (table_id,type))", - "bloom filter positive rate - {{table_id}} - {{type}}", - ), - ], - ), - panels.timeseries_percentage( - "Bloom Filter False-Positive Rate", - "False-Positive / Total", - [ - panels.target( - f"(((sum(rate({table_metric('state_store_read_req_positive_but_non_exist_counts')}[$__rate_interval])) by (table_id,type))) / (sum(rate({table_metric('state_store_read_req_check_bloom_filter_counts')}[$__rate_interval])) by (table_id,type)))", - "read req bloom filter false positive rate - {{table_id}} - {{type}}", - ), - ], - ), - - panels.timeseries_count( - "Slow Fetch Meta Unhits", - "", - [ - panels.target( - f"{metric('state_store_iter_slow_fetch_meta_cache_unhits')}", - "", - ), - ], - ), - - panels.timeseries_ops( - "Read Ops", - "", - [ - panels.target( - f"sum(rate({table_metric('state_store_get_duration_count')}[$__rate_interval])) by (job,instanc,table_id)", - "get - {{table_id}} @ {{job}} @ {{instance}}", - ), - panels.target( - f"sum(rate({table_metric('state_store_get_shared_buffer_hit_counts')}[$__rate_interval])) by (job,instance,table_id)", - "shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}", - ), - panels.target( - f"sum(rate({table_metric('state_store_iter_in_process_counts')}[$__rate_interval])) by(job,instance,table_id)", - "iter - {{table_id}} @ {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_bytes( - "Read Item Size - Get", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_get_key_size_bucket')}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile({quantile}, sum(rate({table_metric('state_store_get_value_size_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"p{legend} - {{{{table_id}}}} {{{{job}}}} @ {{{{instance}}}}", - ), - [50, 99, "max"], - ), - ], - ), - panels.timeseries_bytes( - "Read Item Size - Iter", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_size_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", - ), - [50, 99, "max"], - ), - ], - ), - - panels.timeseries_bytes( - "Materialized View Read Size", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f'sum(histogram_quantile({quantile}, sum(rate({metric("state_store_iter_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group({metric("table_info")}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile({quantile}, sum(rate({metric("state_store_get_key_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile({quantile}, sum(rate({metric("state_store_get_value_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group({metric("table_info")}) by (materialized_view_id, table_id))) by (materialized_view_id)', - f"read p{legend} - materialized view {{{{materialized_view_id}}}}" - ), - [50, 99, "max"], - ), - ], - ), - - panels.timeseries_count( - "Read Item Count - Iter", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_item_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", - ), - [50, 99, "max"], - ), - ], - ), - panels.timeseries_bytes_per_sec( - "Read Throughput - Get", - "The size of a single key-value pair when reading by operation Get." - "Operation Get gets a single key-value pair with respect to a caller-specified key. If the key does not " - "exist in the storage, the size of key is counted into this metric and the size of value is 0.", - [ - panels.target( - f"sum(rate({metric('state_store_get_key_size_sum')}[$__rate_interval])) by(job, instance) + sum(rate({metric('state_store_get_value_size_sum')}[$__rate_interval])) by(job, instance)", - "{{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_bytes_per_sec( - "Read Throughput - Iter", - "The size of all the key-value paris when reading by operation Iter." - "Operation Iter scans a range of key-value pairs.", - [ - panels.target( - f"sum(rate({metric('state_store_iter_size_sum')}[$__rate_interval])) by(job, instance)", - "{{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_latency( - "Fetch Meta Duration", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_fetch_meta_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"fetch_meta_duration p{legend}" + - " - {{table_id}} @ {{job}} @ {{instance}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance, table_id) (rate({table_metric('state_store_iter_fetch_meta_duration_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate({table_metric('state_store_iter_fetch_meta_duration_count')}[$__rate_interval]))", - "fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}", - ), - ], - ), - - panels.timeseries_count( - "Fetch Meta Unhits", - "", - [ - panels.target( - f"{metric('state_store_iter_fetch_meta_cache_unhits')}", - "", - ), - ], - ), - ]) - ] - -def section_hummock_write(outer_panels): - panels = outer_panels.sub_panel() - return [ - outer_panels.row_collapsed( - "Hummock (Write)", - [ - panels.timeseries_bytes( - "Uploader Memory Size", - "This metric shows the real memory usage of uploader.", - [ - panels.target( - f"sum({metric('uploading_memory_size')}) by (job,instance)", - "uploading memory - {{job}} @ {{instance}}", - ), - panels.target( - f"sum({metric('state_store_uploader_uploading_task_size')}) by (job,instance)", - "uploading task size - {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_latency( - "Build and Sync Sstable Duration", - "Histogram of time spent on compacting shared buffer to remote storage.", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('state_store_sync_duration_bucket')}[$__rate_interval])) by (le, job, instance))", - f"p{legend}" + " - {{job}} @ {{instance}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance) (rate({metric('state_store_sync_duration_sum')}[$__rate_interval])) / sum by(le, job, instance) (rate({metric('state_store_sync_duration_count')}[$__rate_interval]))", - "avg - {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_bytes( - "Materialized View Write Size", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f'sum(histogram_quantile({quantile}, sum(rate({metric("state_store_write_batch_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group({metric("table_info")}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)', - f"write p{legend} - materialized view {{{{materialized_view_id}}}}" - ), - [50, 99, "max"], - ), - ], - ), - panels.timeseries_count( - "Uploader - Tasks Count", - "", - [ - panels.target( - f"sum(irate({table_metric('state_store_merge_imm_task_counts')}[$__rate_interval])) by (job,instance,table_id)", - "merge imm tasks - {{table_id}} @ {{instance}} ", - ), - panels.target( - f"sum(irate({metric('state_store_spill_task_counts')}[$__rate_interval])) by (job,instance,uploader_stage)", - "Uploader spill tasks - {{uploader_stage}} @ {{instance}} ", - ), - ], - ), - panels.timeseries_bytes( - "Uploader - Task Size", - "", - [ - panels.target( - f"sum(rate({table_metric('state_store_merge_imm_memory_sz')}[$__rate_interval])) by (job,instance,table_id)", - "Merging tasks memory size - {{table_id}} @ {{instance}} ", - ), - panels.target( - f"sum(rate({metric('state_store_spill_task_size')}[$__rate_interval])) by (job,instance,uploader_stage)", - "Uploading tasks size - {{uploader_stage}} @ {{instance}} ", - ), - ], - ), - - panels.timeseries_ops( - "Write Ops", - "", - [ - panels.target( - f"sum(rate({table_metric('state_store_write_batch_duration_count')}[$__rate_interval])) by (job,instance,table_id)", - "write batch - {{table_id}} @ {{job}} @ {{instance}} ", - ), - panels.target( - f"sum(rate({metric('state_store_sync_duration_count')}[$__rate_interval])) by (job,instance)", - "l0 - {{job}} @ {{instance}} ", - ), - ], - ), - panels.timeseries_latency( - "Write Duration", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_write_batch_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", - f"write to shared_buffer p{legend}" + - " - {{table_id}} @ {{job}} @ {{instance}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance, table_id)(rate({table_metric('state_store_write_batch_duration_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate({table_metric('state_store_write_batch_duration_count')}[$__rate_interval]))", - "write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}", - ), - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('state_store_write_shared_buffer_sync_time_bucket')}[$__rate_interval])) by (le, job, instance))", - f"write to object_store p{legend}" + - " - {{job}} @ {{instance}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance)(rate({metric('state_store_write_shared_buffer_sync_time_sum')}[$__rate_interval])) / sum by(le, job, instance)(rate({metric('state_store_write_shared_buffer_sync_time_count')}[$__rate_interval]))", - "write to object_store - {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_ops( - "Write Item Count", - "", - [ - panels.target( - f"sum(irate({table_metric('state_store_write_batch_tuple_counts')}[$__rate_interval])) by (job,instance,table_id)", - "write_batch_kv_pair_count - {{table_id}} @ {{instance}} ", - ), - ], - ), - panels.timeseries_bytes_per_sec( - "Write Throughput", - "", - [ - panels.target( - f"sum(rate({table_metric('state_store_write_batch_size_sum')}[$__rate_interval]))by(job,instance,table_id) / sum(rate({table_metric('state_store_write_batch_size_count')}[$__rate_interval]))by(job,instance,table_id)", - "shared_buffer - {{table_id}} @ {{job}} @ {{instance}}", - ), - panels.target( - f"sum(rate({metric('compactor_shared_buffer_to_sstable_size_sum')}[$__rate_interval]))by(job,instance) / sum(rate({metric('compactor_shared_buffer_to_sstable_size_count')}[$__rate_interval]))by(job,instance)", - "sync - {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_bytes( - "Mem Table Size (Max)", - "This metric shows the statistics of mem_table size on flush. By default only max (p100) is shown.", - [ - panels.target( - f"histogram_quantile(1.0, sum(rate({metric('state_store_write_batch_size_bucket')}[$__rate_interval])) by (le, table_id, job, instance))", - "pmax - {{table_id}} @ {{job}} @ {{instance}}", - ), - ], - ), - panels.timeseries_bytes( - "Checkpoint Sync Size", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('state_store_sync_size_bucket')}[$__rate_interval])) by (le, job, instance))", - f"p{legend}" + " - {{job}} @ {{instance}}", - ), - [50, 99, "max"], - ), - panels.target( - f"sum by(le, job, instance) (rate({metric('state_store_sync_size_sum')}[$__rate_interval])) / sum by(le, job, instance) (rate({metric('state_store_sync_size_count')}[$__rate_interval]))", - "avg - {{job}} @ {{instance}}", - ), - ], - ), - ]) - ] - - -def section_hummock_tiered_cache(outer_panels): - panels = outer_panels.sub_panel() - file_cache_hit_filter = 'op="lookup",extra="hit"' - file_cache_miss_filter = 'op="lookup",extra="miss"' - return [ - outer_panels.row_collapsed( - "Hummock Tiered Cache", [ panels.timeseries_ops( - "Ops", + "Cache Ops", "", [ panels.target( - f"sum(rate({metric('foyer_storage_op_duration_count')}[$__rate_interval])) by (foyer, op, extra, instance)", - "{{foyer}} file cache {{op}} {{extra}} @ {{instance}}", + f"sum(rate({table_metric('state_store_sst_store_block_request_counts')}[$__rate_interval])) by (job, instance, table_id, type)", + "{{table_id}} @ {{type}} - {{job}} @ {{instance}}", ), - ], - ), - panels.timeseries_latency( - "Duration", - "", - [ - *quantile( - lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('foyer_storage_op_duration_bucket')}[$__rate_interval])) by (le, foyer, op, extra, instance))", - f"p{legend}" + - " - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}", - ), - [50, 99, "max"], + panels.target( + f"sum(rate({metric('state_store_sst_store_block_request_counts', meta_miss_filter)}[$__rate_interval])) by (job, instance, type)", + "total_meta_miss_count - {{job}} @ {{instance}}", ), ], ), - panels.timeseries_bytes_per_sec( - "Throughput", - "", + + panels.timeseries_bytes( + "Cache Size", + "Hummock has three parts of memory usage: 1. Meta Cache 2. Block Cache" + "This metric shows the real memory usage of each of these three caches.", [ panels.target( - f"sum(rate({metric('foyer_storage_op_bytes')}[$__rate_interval])) by (foyer, op, extra, instance)", - "{{foyer}} file cache - {{op}} {{extra}} @ {{instance}}", + f"avg({metric('state_store_meta_cache_size')}) by (job,instance)", + "meta cache - {{job}} @ {{instance}}", + ), + panels.target( + f"avg({metric('state_store_block_cache_size')}) by (job,instance)", + "data cache - {{job}} @ {{instance}}", ), ], ), - panels.timeseries_bytes( - "Size", + + panels.timeseries_percentage( + "Cache Miss Rate", "", [ panels.target( - f"sum({metric('foyer_storage_total_bytes')}) by (foyer, instance)", "{{foyer}} size @ {{instance}}" + f"(sum(rate({table_metric('state_store_sst_store_block_request_counts', meta_miss_filter)}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate({table_metric('state_store_sst_store_block_request_counts', meta_total_filter)}[$__rate_interval])) by (job,instance,table_id))", + "meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}", + ), + panels.target( + f"(sum(rate({table_metric('state_store_sst_store_block_request_counts', data_miss_filter)}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate({table_metric('state_store_sst_store_block_request_counts', data_total_filter)}[$__rate_interval])) by (job,instance,table_id))", + "block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}", ), ], ), - panels.timeseries_percentage( - "Cache Hit Ratio", + + panels.timeseries_ops( + "Iter keys flow", "", [ panels.target( - f"sum(rate({metric('foyer_storage_op_duration_count', file_cache_hit_filter)}[$__rate_interval])) by (foyer, instance) / (sum(rate({metric('foyer_storage_op_duration_count', file_cache_hit_filter)}[$__rate_interval])) by (foyer, instance) + sum(rate({metric('foyer_storage_op_duration_count', file_cache_miss_filter)}[$__rate_interval])) by (foyer, instance))", - "{{foyer}} file cache hit ratio @ {{instance}}", + f"sum(rate({table_metric('state_store_iter_scan_key_counts')}[$__rate_interval])) by (instance, type, table_id)", + "iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ", ), ], ), panels.timeseries_count( - "Refill Queue Length", + "Read Merged SSTs", "", [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_merge_sstable_counts_bucket')}[$__rate_interval])) by (le, job, table_id, type))", + f"# merged ssts p{legend}" + + " - {{table_id}} @ {{job}} @ {{type}}", + ), + [50, 99, "max"], + ), panels.target( - f"sum(refill_queue_total) by (instance)", - "refill queue length @ {{instance}}", + f"sum by(le, job, instance, table_id)(rate({table_metric('state_store_iter_merge_sstable_counts_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate({table_metric('state_store_iter_merge_sstable_counts_count')}[$__rate_interval]))", + "# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}", ), ], ), - panels.timeseries_ops( - "Refill Ops", - "", + + panels.timeseries_latency( + "Read Duration - Get", + "Histogram of the latency of Get operations that have been issued to the state store.", [ - panels.target( - f"sum(rate({metric('data_refill_duration_count')}[$__rate_interval])) by (op, instance)", - "data file cache refill - {{op}} @ {{instance}}", - ), - panels.target( - f"sum(rate({metric('data_refill_filtered_total')}[$__rate_interval])) by (instance)", - "data file cache refill - filtered @ {{instance}}", + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_get_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"p{legend}" + " - {{table_id}} @ {{job}} @ {{instance}}", + ), + [50, 99, "max"], ), panels.target( - f"sum(rate({metric('meta_refill_duration_count')}[$__rate_interval])) by (op, instance)", - "meta file cache refill - {{op}} @ {{instance}}", + f"sum by(le, job, instance, table_id)(rate({table_metric('state_store_get_duration_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate({table_metric('state_store_get_duration_count')}[$__rate_interval]))", + "avg - {{table_id}} {{job}} @ {{instance}}", ), ], ), panels.timeseries_latency( - "Refill Latency", - "", + "Read Duration - Iter", + "Histogram of the time spent on iterator initialization." + "Histogram of the time spent on iterator scanning.", [ *quantile( lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('data_refill_duration_bucket')}[$__rate_interval])) by (le, op, instance))", - f"p{legend} - " + - "data file cache refill - {{op}} @ {{instance}}", + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_init_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"create_iter_time p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", ), [50, 99, "max"], ), + panels.target( + f"sum by(le, job, instance)(rate({metric('state_store_iter_init_duration_sum')}[$__rate_interval])) / sum by(le, job,instance) (rate({metric('state_store_iter_init_duration_count')}[$__rate_interval]))", + "create_iter_time avg - {{job}} @ {{instance}}", + ), *quantile( lambda quantile, legend: panels.target( - f"histogram_quantile({quantile}, sum(rate({metric('meta_refill_duration_bucket')}[$__rate_interval])) by (le, instance))", - f"p{legend} - " + - "meta cache refill @ {{instance}}", + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_scan_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"pure_scan_time p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", ), [50, 99, "max"], ), + panels.target( + f"sum by(le, job, instance)(rate({metric('state_store_iter_scan_duration_sum')}[$__rate_interval])) / sum by(le, job,instance) (rate({metric('state_store_iter_scan_duration_count')}[$__rate_interval]))", + "pure_scan_time avg - {{job}} @ {{instance}}", + ), ], ), - ], - ) - ] - -def section_hummock_manager(outer_panels): - panels = outer_panels.sub_panel() - total_key_size_filter = "metric='total_key_size'" + panels.timeseries_ops( + "Bloom Filter Ops", + "", + [ + panels.target( + f"sum(irate({table_metric('state_store_read_req_positive_but_non_exist_counts')}[$__rate_interval])) by (table_id,type)", + "bloom filter false positive count - {{table_id}} - {{type}}", + ), + panels.target( + f"sum(irate({table_metric('state_store_read_req_bloom_filter_positive_counts')}[$__rate_interval])) by (table_id,type)", + "bloom filter positive count - {{table_id}} - {{type}}", + ), + panels.target( + f"sum(irate({table_metric('state_store_read_req_check_bloom_filter_counts')}[$__rate_interval])) by (table_id,type)", + "bloom filter check count- {{table_id}} - {{type}}", + ), + ], + ), + + panels.timeseries_percentage( + "Bloom Filter Positive Rate", + "Positive / Total", + [ + panels.target( + f"(sum(rate({table_metric('state_store_read_req_bloom_filter_positive_counts')}[$__rate_interval])) by (table_id,type)) / (sum(rate({table_metric('state_store_read_req_check_bloom_filter_counts')}[$__rate_interval])) by (table_id,type))", + "bloom filter positive rate - {{table_id}} - {{type}}", + ), + ], + ), + panels.timeseries_percentage( + "Bloom Filter False-Positive Rate", + "False-Positive / Total", + [ + panels.target( + f"(((sum(rate({table_metric('state_store_read_req_positive_but_non_exist_counts')}[$__rate_interval])) by (table_id,type))) / (sum(rate({table_metric('state_store_read_req_check_bloom_filter_counts')}[$__rate_interval])) by (table_id,type)))", + "read req bloom filter false positive rate - {{table_id}} - {{type}}", + ), + ], + ), + + panels.timeseries_count( + "Slow Fetch Meta Unhits", + "", + [ + panels.target( + f"{metric('state_store_iter_slow_fetch_meta_cache_unhits')}", + "", + ), + ], + ), + + panels.timeseries_ops( + "Read Ops", + "", + [ + panels.target( + f"sum(rate({table_metric('state_store_get_duration_count')}[$__rate_interval])) by (job,instanc,table_id)", + "get - {{table_id}} @ {{job}} @ {{instance}}", + ), + panels.target( + f"sum(rate({table_metric('state_store_get_shared_buffer_hit_counts')}[$__rate_interval])) by (job,instance,table_id)", + "shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}", + ), + panels.target( + f"sum(rate({table_metric('state_store_iter_in_process_counts')}[$__rate_interval])) by(job,instance,table_id)", + "iter - {{table_id}} @ {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_bytes( + "Read Item Size - Get", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_get_key_size_bucket')}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile({quantile}, sum(rate({table_metric('state_store_get_value_size_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"p{legend} - {{{{table_id}}}} {{{{job}}}} @ {{{{instance}}}}", + ), + [50, 99, "max"], + ), + ], + ), + panels.timeseries_bytes( + "Read Item Size - Iter", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_size_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", + ), + [50, 99, "max"], + ), + ], + ), + + panels.timeseries_bytes( + "Materialized View Read Size", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f'sum(histogram_quantile({quantile}, sum(rate({metric("state_store_iter_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group({metric("table_info")}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile({quantile}, sum(rate({metric("state_store_get_key_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile({quantile}, sum(rate({metric("state_store_get_value_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group({metric("table_info")}) by (materialized_view_id, table_id))) by (materialized_view_id)', + f"read p{legend} - materialized view {{{{materialized_view_id}}}}" + ), + [50, 99, "max"], + ), + ], + ), + + panels.timeseries_count( + "Read Item Count - Iter", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_item_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"p{legend} - {{{{table_id}}}} @ {{{{job}}}} @ {{{{instance}}}}", + ), + [50, 99, "max"], + ), + ], + ), + panels.timeseries_bytes_per_sec( + "Read Throughput - Get", + "The size of a single key-value pair when reading by operation Get." + "Operation Get gets a single key-value pair with respect to a caller-specified key. If the key does not " + "exist in the storage, the size of key is counted into this metric and the size of value is 0.", + [ + panels.target( + f"sum(rate({metric('state_store_get_key_size_sum')}[$__rate_interval])) by(job, instance) + sum(rate({metric('state_store_get_value_size_sum')}[$__rate_interval])) by(job, instance)", + "{{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_bytes_per_sec( + "Read Throughput - Iter", + "The size of all the key-value paris when reading by operation Iter." + "Operation Iter scans a range of key-value pairs.", + [ + panels.target( + f"sum(rate({metric('state_store_iter_size_sum')}[$__rate_interval])) by(job, instance)", + "{{job}} @ {{instance}}", + ), + ], + ), + + panels.timeseries_latency( + "Fetch Meta Duration", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_iter_fetch_meta_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"fetch_meta_duration p{legend}" + + " - {{table_id}} @ {{job}} @ {{instance}}", + ), + [50, 99, "max"], + ), + panels.target( + f"sum by(le, job, instance, table_id) (rate({table_metric('state_store_iter_fetch_meta_duration_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate({table_metric('state_store_iter_fetch_meta_duration_count')}[$__rate_interval]))", + "fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}", + ), + ], + ), + + panels.timeseries_count( + "Fetch Meta Unhits", + "", + [ + panels.target( + f"{metric('state_store_iter_fetch_meta_cache_unhits')}", + "", + ), + ], + ), + ]) + ] + + +def section_hummock_write(outer_panels): + panels = outer_panels.sub_panel() + return [ + outer_panels.row_collapsed( + "Hummock (Write)", + [ + panels.timeseries_bytes( + "Uploader Memory Size", + "This metric shows the real memory usage of uploader.", + [ + panels.target( + f"sum({metric('uploading_memory_size')}) by (job,instance)", + "uploading memory - {{job}} @ {{instance}}", + ), + panels.target( + f"sum({metric('state_store_uploader_uploading_task_size')}) by (job,instance)", + "uploading task size - {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_latency( + "Build and Sync Sstable Duration", + "Histogram of time spent on compacting shared buffer to remote storage.", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('state_store_sync_duration_bucket')}[$__rate_interval])) by (le, job, instance))", + f"p{legend}" + " - {{job}} @ {{instance}}", + ), + [50, 99, "max"], + ), + panels.target( + f"sum by(le, job, instance) (rate({metric('state_store_sync_duration_sum')}[$__rate_interval])) / sum by(le, job, instance) (rate({metric('state_store_sync_duration_count')}[$__rate_interval]))", + "avg - {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_bytes( + "Materialized View Write Size", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f'sum(histogram_quantile({quantile}, sum(rate({metric("state_store_write_batch_size_bucket")}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group({metric("table_info")}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)', + f"write p{legend} - materialized view {{{{materialized_view_id}}}}" + ), + [50, 99, "max"], + ), + ], + ), + panels.timeseries_count( + "Uploader - Tasks Count", + "", + [ + panels.target( + f"sum(irate({table_metric('state_store_merge_imm_task_counts')}[$__rate_interval])) by (job,instance,table_id)", + "merge imm tasks - {{table_id}} @ {{instance}} ", + ), + panels.target( + f"sum(irate({metric('state_store_spill_task_counts')}[$__rate_interval])) by (job,instance,uploader_stage)", + "Uploader spill tasks - {{uploader_stage}} @ {{instance}} ", + ), + ], + ), + panels.timeseries_bytes( + "Uploader - Task Size", + "", + [ + panels.target( + f"sum(rate({table_metric('state_store_merge_imm_memory_sz')}[$__rate_interval])) by (job,instance,table_id)", + "Merging tasks memory size - {{table_id}} @ {{instance}} ", + ), + panels.target( + f"sum(rate({metric('state_store_spill_task_size')}[$__rate_interval])) by (job,instance,uploader_stage)", + "Uploading tasks size - {{uploader_stage}} @ {{instance}} ", + ), + ], + ), + + panels.timeseries_ops( + "Write Ops", + "", + [ + panels.target( + f"sum(rate({table_metric('state_store_write_batch_duration_count')}[$__rate_interval])) by (job,instance,table_id)", + "write batch - {{table_id}} @ {{job}} @ {{instance}} ", + ), + panels.target( + f"sum(rate({metric('state_store_sync_duration_count')}[$__rate_interval])) by (job,instance)", + "l0 - {{job}} @ {{instance}} ", + ), + ], + ), + panels.timeseries_latency( + "Write Duration", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({table_metric('state_store_write_batch_duration_bucket')}[$__rate_interval])) by (le, job, instance, table_id))", + f"write to shared_buffer p{legend}" + + " - {{table_id}} @ {{job}} @ {{instance}}", + ), + [50, 99, "max"], + ), + panels.target( + f"sum by(le, job, instance, table_id)(rate({table_metric('state_store_write_batch_duration_sum')}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate({table_metric('state_store_write_batch_duration_count')}[$__rate_interval]))", + "write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}", + ), + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('state_store_write_shared_buffer_sync_time_bucket')}[$__rate_interval])) by (le, job, instance))", + f"write to object_store p{legend}" + + " - {{job}} @ {{instance}}", + ), + [50, 99, "max"], + ), + panels.target( + f"sum by(le, job, instance)(rate({metric('state_store_write_shared_buffer_sync_time_sum')}[$__rate_interval])) / sum by(le, job, instance)(rate({metric('state_store_write_shared_buffer_sync_time_count')}[$__rate_interval]))", + "write to object_store - {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_ops( + "Write Item Count", + "", + [ + panels.target( + f"sum(irate({table_metric('state_store_write_batch_tuple_counts')}[$__rate_interval])) by (job,instance,table_id)", + "write_batch_kv_pair_count - {{table_id}} @ {{instance}} ", + ), + ], + ), + panels.timeseries_bytes_per_sec( + "Write Throughput", + "", + [ + panels.target( + f"sum(rate({table_metric('state_store_write_batch_size_sum')}[$__rate_interval]))by(job,instance,table_id) / sum(rate({table_metric('state_store_write_batch_size_count')}[$__rate_interval]))by(job,instance,table_id)", + "shared_buffer - {{table_id}} @ {{job}} @ {{instance}}", + ), + panels.target( + f"sum(rate({metric('compactor_shared_buffer_to_sstable_size_sum')}[$__rate_interval]))by(job,instance) / sum(rate({metric('compactor_shared_buffer_to_sstable_size_count')}[$__rate_interval]))by(job,instance)", + "sync - {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_bytes( + "Write Batch Size", + "This metric shows the statistics of mem_table size on flush. By default only max (p100) is shown.", + [ + panels.target( + f"histogram_quantile(1.0, sum(rate({metric('state_store_write_batch_size_bucket')}[$__rate_interval])) by (le, table_id, job, instance))", + "pmax - {{table_id}} @ {{job}} @ {{instance}}", + ), + + panels.target( + f"sum by(le, job, instance) (rate({metric('state_store_write_batch_size_sum')}[$__rate_interval])) / sum by(le, table_id, job, instance) (rate({metric('state_store_write_batch_size_count')}[$__rate_interval]))", + "avg - {{table_id}} {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_bytes( + "Checkpoint Sync Size", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('state_store_sync_size_bucket')}[$__rate_interval])) by (le, job, instance))", + f"p{legend}" + " - {{job}} @ {{instance}}", + ), + [50, 99, "max"], + ), + panels.target( + f"sum by(le, job, instance) (rate({metric('state_store_sync_size_sum')}[$__rate_interval])) / sum by(le, job, instance) (rate({metric('state_store_sync_size_count')}[$__rate_interval]))", + "avg - {{job}} @ {{instance}}", + ), + ], + ), + ]) + ] + + +def section_hummock_tiered_cache(outer_panels): + panels = outer_panels.sub_panel() + file_cache_hit_filter = 'op="lookup",extra="hit"' + file_cache_miss_filter = 'op="lookup",extra="miss"' + return [ + outer_panels.row_collapsed( + "Hummock Tiered Cache", + [ + panels.timeseries_ops( + "Ops", + "", + [ + panels.target( + f"sum(rate({metric('foyer_storage_op_duration_count')}[$__rate_interval])) by (foyer, op, extra, instance)", + "{{foyer}} file cache {{op}} {{extra}} @ {{instance}}", + ), + ], + ), + panels.timeseries_latency( + "Duration", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('foyer_storage_op_duration_bucket')}[$__rate_interval])) by (le, foyer, op, extra, instance))", + f"p{legend}" + + " - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}", + ), + [50, 99, "max"], + ), + ], + ), + panels.timeseries_bytes_per_sec( + "Throughput", + "", + [ + panels.target( + f"sum(rate({metric('foyer_storage_op_bytes')}[$__rate_interval])) by (foyer, op, extra, instance)", + "{{foyer}} file cache - {{op}} {{extra}} @ {{instance}}", + ), + ], + ), + panels.timeseries_bytes( + "Size", + "", + [ + panels.target( + f"sum({metric('foyer_storage_total_bytes')}) by (foyer, instance)", + "{{foyer}} size @ {{instance}}" + ), + ], + ), + panels.timeseries_percentage( + "Cache Hit Ratio", + "", + [ + panels.target( + f"sum(rate({metric('foyer_storage_op_duration_count', file_cache_hit_filter)}[$__rate_interval])) by (foyer, instance) / (sum(rate({metric('foyer_storage_op_duration_count', file_cache_hit_filter)}[$__rate_interval])) by (foyer, instance) + sum(rate({metric('foyer_storage_op_duration_count', file_cache_miss_filter)}[$__rate_interval])) by (foyer, instance))", + "{{foyer}} file cache hit ratio @ {{instance}}", + ), + ], + ), + panels.timeseries_count( + "Refill Queue Length", + "", + [ + panels.target( + f"sum(refill_queue_total) by (instance)", + "refill queue length @ {{instance}}", + ), + ], + ), + panels.timeseries_ops( + "Refill Ops", + "", + [ + panels.target( + f"sum(rate({metric('refill_duration_count')}[$__rate_interval])) by (type, op, instance)", + "{{type}} file cache refill - {{op}} @ {{instance}}", + ), + panels.target( + f"sum(rate({metric('refill_total')}[$__rate_interval])) by (type, op, instance)", + "{{type}} file cache refill - {{op}} @ {{instance}}", + ), + ], + ), + panels.timeseries_latency( + "Refill Latency", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('refill_duration_bucket')}[$__rate_interval])) by (le, type, op, instance))", + f"p{legend} - " + + "{{type}} file cache refill - {{op}} @ {{instance}}", + ), + [50, 99, "max"], + ), + ], + ), + ], + ) + ] + + +def section_hummock_manager(outer_panels): + panels = outer_panels.sub_panel() + total_key_size_filter = "metric='total_key_size'" total_value_size_filter = "metric='total_value_size'" total_key_count_filter = "metric='total_key_count'" mv_total_size_filter = "metric='materialized_view_total_size'" @@ -2394,7 +2444,6 @@ def section_hummock_manager(outer_panels): ], ), - panels.timeseries_count( "Table KV Count", "", @@ -2796,300 +2845,273 @@ def section_grpc_hummock_meta_client(outer_panels): "get_new_sst_ids_latency_latency_p99 - {{instance}} ", ), panels.target( - f"sum(irate({metric('state_store_get_new_sst_ids_latency_sum')}[$__rate_interval])) / sum(irate({metric('state_store_get_new_sst_ids_latency_count')}[$__rate_interval]))", - "get_new_sst_ids_latency_latency_avg", + f"sum(irate({metric('state_store_get_new_sst_ids_latency_sum')}[$__rate_interval])) / sum(irate({metric('state_store_get_new_sst_ids_latency_count')}[$__rate_interval]))", + "get_new_sst_ids_latency_latency_avg", + ), + panels.target( + f"histogram_quantile(0.90, sum(irate({metric('state_store_get_new_sst_ids_latency_bucket')}[$__rate_interval])) by (le, job, instance))", + "get_new_sst_ids_latency_latency_p90 - {{instance}} ", + ), + ], + ), + panels.timeseries_count( + "table_count", + "", + [ + panels.target( + f"sum(irate({metric('state_store_get_new_sst_ids_latency_counts')}[$__rate_interval]))by(job,instance)", + "get_new_sst_ids_latency_counts - {{instance}} ", + ), + ], + ), + panels.timeseries_latency( + "compaction_latency", + "", + [ + panels.target( + f"histogram_quantile(0.5, sum(irate({metric('state_store_report_compaction_task_latency_bucket')}[$__rate_interval])) by (le, job, instance))", + "report_compaction_task_latency_p50 - {{instance}}", + ), + panels.target( + f"histogram_quantile(0.99, sum(irate({metric('state_store_report_compaction_task_latency_bucket')}[$__rate_interval])) by (le, job, instance))", + "report_compaction_task_latency_p99 - {{instance}}", + ), + panels.target( + f"sum(irate({metric('state_store_report_compaction_task_latency_sum')}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))", + "report_compaction_task_latency_avg", + ), + panels.target( + f"histogram_quantile(0.90, sum(irate({metric('state_store_report_compaction_task_latency_bucket')}[$__rate_interval])) by (le, job, instance))", + "report_compaction_task_latency_p90 - {{instance}}", + ), + ], + ), + ], + ), + ] + + +def section_kafka_native_metrics(outer_panels): + panels = outer_panels.sub_panel() + return [ + outer_panels.row_collapsed( + "Kafka Native Metrics", + [ + panels.timeseries_count( + "Message Count in Producer Queue", + "Current number of messages in producer queues", + [ + panels.target( + f"{metric('rdkafka_top_msg_cnt')}", + "id {{ id }}, client_id {{ client_id }}" + ), + ] + ), + panels.timeseries_bytes( + "Message Size in Producer Queue", + "Current total size of messages in producer queues", + [ + panels.target( + f"{metric('rdkafka_top_msg_size')}", + "id {{ id }}, client_id {{ client_id }}" + ), + ] + ), + panels.timeseries_count( + "Message Produced Count", + "Total number of messages transmitted (produced) to Kafka brokers", + [ + panels.target( + f"{metric('rdkafka_top_tx_msgs')}", + "id {{ id }}, client_id {{ client_id }}" + ) + ] + ), + panels.timeseries_count( + "Message Received Count", + "Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.", + [ + panels.target( + f"{metric('rdkafka_top_rx_msgs')}", + "id {{ id }}, client_id {{ client_id }}" + ) + ] + ), + + panels.timeseries_count( + "Message Count Pending to Transmit (per broker)", + "Number of messages awaiting transmission to broker", + [ + panels.target( + f"{metric('rdkafka_broker_outbuf_msg_cnt')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" + ), + ] + ), + panels.timeseries_count( + "Inflight Message Count (per broker)", + "Number of messages in-flight to broker awaiting response", + [ + panels.target( + f"{metric('rdkafka_broker_waitresp_msg_cnt')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" + ) + ] + ), + panels.timeseries_count( + "Error Count When Transmitting (per broker)", + "Total number of transmission errors", + [ + panels.target( + f"{metric('rdkafka_broker_tx_errs')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" + ) + ] + ), + panels.timeseries_count( + "Error Count When Receiving (per broker)", + "Total number of receive errors", + [ + panels.target( + f"{metric('rdkafka_broker_rx_errs')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" + ) + ] + ), + panels.timeseries_count( + "Timeout Request Count (per broker)", + "Total number of requests timed out", + [ + panels.target( + f"{metric('rdkafka_broker_req_timeouts')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" + ) + ] + ), + panels.timeseries_latency_ms( + "RTT (per broker)", + "Broker latency / round-trip time in milli seconds", + [ + panels.target( + f"{metric('rdkafka_broker_rtt_avg')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", + ), + panels.target( + f"{metric('rdkafka_broker_rtt_p75')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", + ), + panels.target( + f"{metric('rdkafka_broker_rtt_p90')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", + ), + panels.target( + f"{metric('rdkafka_broker_rtt_p99')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", + ), + panels.target( + f"{metric('rdkafka_broker_rtt_p99_99')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), panels.target( - f"histogram_quantile(0.90, sum(irate({metric('state_store_get_new_sst_ids_latency_bucket')}[$__rate_interval])) by (le, job, instance))", - "get_new_sst_ids_latency_latency_p90 - {{instance}} ", + f"{metric('rdkafka_broker_rtt_out_of_range')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), - ], + ] ), - panels.timeseries_count( - "table_count", - "", + panels.timeseries_latency_ms( + "Throttle Time (per broker)", + "Broker throttling time in milliseconds", [ panels.target( - f"sum(irate({metric('state_store_get_new_sst_ids_latency_counts')}[$__rate_interval]))by(job,instance)", - "get_new_sst_ids_latency_counts - {{instance}} ", + f"{metric('rdkafka_broker_throttle_avg')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), - ], - ), - panels.timeseries_latency( - "compaction_latency", - "", - [ panels.target( - f"histogram_quantile(0.5, sum(irate({metric('state_store_report_compaction_task_latency_bucket')}[$__rate_interval])) by (le, job, instance))", - "report_compaction_task_latency_p50 - {{instance}}", + f"{metric('rdkafka_broker_throttle_p75')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), panels.target( - f"histogram_quantile(0.99, sum(irate({metric('state_store_report_compaction_task_latency_bucket')}[$__rate_interval])) by (le, job, instance))", - "report_compaction_task_latency_p99 - {{instance}}", + f"{metric('rdkafka_broker_throttle_p90')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), panels.target( - f"sum(irate({metric('state_store_report_compaction_task_latency_sum')}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))", - "report_compaction_task_latency_avg", + f"{metric('rdkafka_broker_throttle_p99')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), panels.target( - f"histogram_quantile(0.90, sum(irate({metric('state_store_report_compaction_task_latency_bucket')}[$__rate_interval])) by (le, job, instance))", - "report_compaction_task_latency_p90 - {{instance}}", + f"{metric('rdkafka_broker_throttle_p99_99')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", ), - ], + panels.target( + f"{metric('rdkafka_broker_throttle_out_of_range')}/1000", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", + ), + ] ), - ], - ), - ] - -def section_kafka_native_metrics(outer_panels): - panels = outer_panels.sub_panel() - cluster_panels = panels.sub_panel() - broker_panels = panels.sub_panel() - topic_panels = panels.sub_panel() - partition_panels = panels.sub_panel() - return [ - outer_panels.row_collapsed( - "Kafka Native Metrics", - [ - panels.row_collapsed( - "Cluster Level Metrics", + panels.timeseries_latency_ms( + "Topic Metadata_age Age", + "Age of metadata from broker for this topic (milliseconds)", [ - cluster_panels.timeseries_latency_ms( - "Client Age", - "Time since this client instance was created (milli seconds)", - [ - cluster_panels.target( - f"{metric('rdkafka_top_age')}/1000", - "id {{ id }}, client_id {{ client_id }}" - ), - ], - ), - cluster_panels.timeseries_count( - "Message Count in Producer Queue", - "Current number of messages in producer queues", - [ - cluster_panels.target( - f"{metric('rdkafka_top_msg_cnt')}", - "id {{ id }}, client_id {{ client_id }}" - ), - ] - ), - cluster_panels.timeseries_bytes( - "Message Size in Producer Queue", - "Current total size of messages in producer queues", - [ - cluster_panels.target( - f"{metric('rdkafka_top_msg_size')}", - "id {{ id }}, client_id {{ client_id }}" - ), - ] - ), - cluster_panels.timeseries_count( - "Message Produced Count", - "Total number of messages transmitted (produced) to Kafka brokers", - [ - cluster_panels.target( - f"{metric('rdkafka_top_tx_msgs')}", - "id {{ id }}, client_id {{ client_id }}" - ) - ] - ), - cluster_panels.timeseries_count( - "Message Received Count", - "Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.", - [ - cluster_panels.target( - f"{metric('rdkafka_top_rx_msgs')}", - "id {{ id }}, client_id {{ client_id }}" - ) - ] - ), + panels.target( + f"{metric('rdkafka_topic_metadata_age')}", + "id {{ id }}, client_id {{ client_id}}, topic {{ topic }}" + ) ] ), - panels.row_collapsed( - "Broker Level Metrics", + panels.timeseries_bytes( + "Topic Batch Size", + "Batch sizes in bytes", [ - broker_panels.timeseries_count( - "Message Count Pending to Transmit (per broker)", - "Number of messages awaiting transmission to broker", - [ - broker_panels.target( - f"{metric('rdkafka_broker_outbuf_msg_cnt')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" - ), - ] - ), - broker_panels.timeseries_count( - "Inflight Message Count (per broker)", - "Number of messages in-flight to broker awaiting response", - [ - broker_panels.target( - f"{metric('rdkafka_broker_waitresp_msg_cnt')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" - ) - ] - ), - broker_panels.timeseries_count( - "Error Count When Transmitting (per broker)", - "Total number of transmission errors", - [ - broker_panels.target( - f"{metric('rdkafka_broker_tx_errs')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" - ) - ] - ), - broker_panels.timeseries_count( - "Error Count When Receiving (per broker)", - "Total number of receive errors", - [ - broker_panels.target( - f"{metric('rdkafka_broker_rx_errs')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" - ) - ] + panels.target( + f"{metric('rdkafka_topic_batchsize_avg')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - broker_panels.timeseries_count( - "Timeout Request Count (per broker)", - "Total number of requests timed out", - [ - broker_panels.target( - f"{metric('rdkafka_broker_req_timeouts')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}" - ) - ] + panels.target( + f"{metric('rdkafka_topic_batchsize_p75')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - broker_panels.timeseries_latency_ms( - "RTT (per broker)", - "Broker latency / round-trip time in milli seconds", - [ - broker_panels.target( - f"{metric('rdkafka_broker_rtt_avg')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_rtt_p75')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_rtt_p90')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_rtt_p99')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_rtt_p99_99')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_rtt_out_of_range')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - ] + panels.target( + f"{metric('rdkafka_topic_batchsize_p90')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - broker_panels.timeseries_latency_ms( - "Throttle Time (per broker)", - "Broker throttling time in milliseconds", - [ - broker_panels.target( - f"{metric('rdkafka_broker_throttle_avg')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_throttle_p75')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_throttle_p90')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_throttle_p99')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_throttle_p99_99')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - broker_panels.target( - f"{metric('rdkafka_broker_throttle_out_of_range')}/1000", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}", - ), - ] + panels.target( + f"{metric('rdkafka_topic_batchsize_p99')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - ] - ), - panels.row_collapsed( - "Topic Level Metrics", - [ - topic_panels.timeseries_latency_ms( - "Topic Metadata_age Age", - "Age of metadata from broker for this topic (milliseconds)", - [ - topic_panels.target( - f"{metric('rdkafka_topic_metadata_age')}", - "id {{ id }}, client_id {{ client_id}}, topic {{ topic }}" - ) - ] + panels.target( + f"{metric('rdkafka_topic_batchsize_p99_99')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.timeseries_bytes( - "Topic Batch Size", - "Batch sizes in bytes", - [ - topic_panels.target( - f"{metric('rdkafka_topic_batchsize_avg')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" - ), - topic_panels.target( - f"{metric('rdkafka_topic_batchsize_p75')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" - ), - topic_panels.target( - f"{metric('rdkafka_topic_batchsize_p90')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" - ), - topic_panels.target( - f"{metric('rdkafka_topic_batchsize_p99')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" - ), - topic_panels.target( - f"{metric('rdkafka_topic_batchsize_p99_99')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" - ), - topic_panels.target( - f"{metric('rdkafka_topic_batchsize_out_of_range')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" - ), - ] + panels.target( + f"{metric('rdkafka_topic_batchsize_out_of_range')}", + "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.timeseries_count( + panels.timeseries_count( "Topic Batch Messages", "Batch message counts", [ - topic_panels.target( + panels.target( f"{metric('rdkafka_topic_batchcnt_avg')}", "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.target( + panels.target( f"{metric('rdkafka_topic_batchcnt_p75')}", "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.target( + panels.target( f"{metric('rdkafka_topic_batchcnt_p90')}", "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.target( + panels.target( f"{metric('rdkafka_topic_batchcnt_p99')}", "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.target( + panels.target( f"{metric('rdkafka_topic_batchcnt_p99_99')}", "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), - topic_panels.target( + panels.target( f"{metric('rdkafka_topic_batchcnt_out_of_range')}", "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}" ), @@ -3097,51 +3119,46 @@ def section_kafka_native_metrics(outer_panels): ) ] ), - panels.row_collapsed( - "Partition Level Metrics", + panels.timeseries_count( + "Message to be Transmitted", + "Number of messages ready to be produced in transmit queue", [ - partition_panels.timeseries_count( - "Message to be Transmitted", - "Number of messages ready to be produced in transmit queue", - [ - partition_panels.target( - f"{metric('rdkafka_topic_partition_xmit_msgq_cnt')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}" - ), - ] - ), - partition_panels.timeseries_count( - "Message in pre fetch queue", - "Number of pre-fetched messages in fetch queue", - [ - partition_panels.target( - f"{metric('rdkafka_topic_partition_fetchq_cnt')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}" - ), - ] - ), - partition_panels.timeseries_count( - "Next offset to fetch", - "Next offset to fetch", - [ - partition_panels.target( - f"{metric('rdkafka_topic_partition_next_offset')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}" - ) - ] + panels.target( + f"{metric('rdkafka_topic_partition_xmit_msgq_cnt')}", + "id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}" ), - partition_panels.timeseries_count( - "Committed Offset", - "Last committed offset", - [ - partition_panels.target( - f"{metric('rdkafka_topic_partition_committed_offset')}", - "id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}" - ) - ] + ] + ), + panels.timeseries_count( + "Message in pre fetch queue", + "Number of pre-fetched messages in fetch queue", + [ + panels.target( + f"{metric('rdkafka_topic_partition_fetchq_cnt')}", + "id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}" ), ] ), + panels.timeseries_count( + "Next offset to fetch", + "Next offset to fetch", + [ + panels.target( + f"{metric('rdkafka_topic_partition_next_offset')}", + "id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}" + ) + ] + ), + panels.timeseries_count( + "Committed Offset", + "Last committed offset", + [ + panels.target( + f"{metric('rdkafka_topic_partition_committed_offset')}", + "id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}" + ) + ] + ), ] ) ] @@ -3203,12 +3220,32 @@ def section_memory_manager(outer_panels): ), ], ), + panels.timeseries_memory( + "The allocated memory of jvm", + "", + [ + panels.target( + f"{metric('jvm_allocated_bytes')}", + "", + ), + ], + ), + panels.timeseries_memory( + "The active memory of jvm", + "", + [ + panels.target( + f"{metric('jvm_active_bytes')}", + "", + ), + ], + ), panels.timeseries_ms( "LRU manager diff between current watermark and evicted watermark time (ms) for actors", "", [ panels.target( - f"{metric('lru_evicted_watermark_time_diff_ms')}", + f"{metric('lru_current_watermark_time_ms')} - on() group_right() {metric('lru_evicted_watermark_time_ms')}", "table {{table_id}} actor {{actor_id}} desc: {{desc}}", ), ], @@ -3248,6 +3285,143 @@ def section_connector_node(outer_panels): ) ] +def section_sink_metrics(outer_panels): + panels = outer_panels.sub_panel() + return [ + outer_panels.row_collapsed( + "Sink Metrics", + [ + panels.timeseries_latency( + "Commit Duration", + "", + [ + *quantile( + lambda quantile, legend: panels.target( + f"histogram_quantile({quantile}, sum(rate({metric('sink_commit_duration_bucket')}[$__rate_interval])) by (le, connector, sink_id))", + f"p{legend}" + " @ {{connector}} {{sink_id}}", + ), + [50, 99, "max"], + ), + panels.target( + f"sum by(le, connector, sink_id)(rate({metric('sink_commit_duration_sum')}[$__rate_interval])) / sum by(le, type, job, instance) (rate({metric('sink_commit_duration_count')}[$__rate_interval]))", + "avg - {{connector}} @ {{sink_id}}", + ), + ], + ), + panels.timeseries_id( + "Log Store Read/Write Epoch", + "", + [ + panels.target(f"{metric('log_store_latest_write_epoch')}", + "latest write epoch @ {{connector}} {{sink_id}} {{executor_id}}"), + panels.target(f"{metric('log_store_latest_read_epoch')}", + "latest read epoch @ {{connector}} {{sink_id}} {{executor_id}}"), + ], + ), + panels.timeseries_latency( + "Log Store Lag", + "", + [ + panels.target(f"(max({metric('log_store_latest_write_epoch')}) by (connector, sink_id, executor_id)" + + f"- max({metric('log_store_latest_read_epoch')}) by (connector, sink_id, executor_id)) / (2^16) / 1000", + "Consume lag @ {{connector}} {{sink_id}} {{executor_id}}" + ), + ], + ), + panels.timeseries_latency( + "Log Store Consume Persistent Log Lag", + "", + [ + panels.target(f"clamp_min((max({metric('log_store_first_write_epoch')}) by (connector, sink_id, executor_id)" + + f"- max({metric('log_store_latest_read_epoch')}) by (connector, sink_id, executor_id)) / (2^16) / 1000, 0)", + "Consume persistent log lag @ {{connector}} {{sink_id}} {{executor_id}}" + ), + ], + ), + panels.timeseries_rowsps( + "Log Store Consume Throughput(rows)", + "", + [ + panels.target( + f"sum(rate({metric('log_store_read_rows')}[$__rate_interval])) by (connector, sink_id)", + "sink={{connector}} {{sink_id}}", + ), + ], + ), + panels.timeseries_rowsps( + "Executor Log Store Consume Throughput(rows)", + "", + [ + panels.target( + f"sum(rate({metric('log_store_read_rows')}[$__rate_interval])) by (instance, connector, sink_id, executor_id)", + "sink={{connector}} {{sink_id}} @ {{executor_id}} {{instance}}", + ), + ], + ), + panels.timeseries_rowsps( + "Log Store Write Throughput(rows)", + "", + [ + panels.target( + f"sum(rate({metric('log_store_write_rows')}[$__rate_interval])) by (connector, sink_id)", + "sink={{connector}} {{sink_id}}", + ), + ], + ), + panels.timeseries_rowsps( + "Executor Log Store Write Throughput(rows)", + "", + [ + panels.target( + f"sum(rate({metric('log_store_write_rows')}[$__rate_interval])) by (instance, connector, sink_id, executor_id)", + "sink={{connector}} {{sink_id}} @ {{executor_id}} {{instance}}", + ), + ], + ), + panels.timeseries_ops( + "Kv Log Store Read Storage Row Ops", + "", + [ + panels.target( + f"sum(rate({metric('kv_log_store_storage_read_count')}[$__rate_interval])) by (executor_id, connector, sink_id)", + "{{executor_id}} - {{connector}} @ {{sink_id}}", + ), + ], + ), + panels.timeseries_bytes( + "Kv Log Store Read Storage Size", + "", + [ + panels.target( + f"sum(rate({metric('kv_log_store_storage_read_size')}[$__rate_interval])) by (executor_id, connector, sink_id)", + "{{executor_id}} - {{connector}} @ {{sink_id}}", + ), + ] + ), + panels.timeseries_ops( + "Kv Log Store Write Storage Row Ops", + "", + [ + panels.target( + f"sum(rate({metric('kv_log_store_storage_write_count')}[$__rate_interval])) by (executor_id, connector, sink_id)", + "{{executor_id}} - {{connector}} @ {{sink_id}}", + ), + ], + ), + panels.timeseries_bytes( + "Kv Log Store Write Storage Size", + "", + [ + panels.target( + f"sum(rate({metric('kv_log_store_storage_write_size')}[$__rate_interval])) by (executor_id, connector, sink_id)", + "{{executor_id}} - {{connector}} @ {{sink_id}}", + ), + ] + ), + ], + ) + ] + def section_network_connection(outer_panels): panels = outer_panels.sub_panel() s3_filter = 'connection_type="S3"' @@ -3561,6 +3735,7 @@ def section_network_connection(outer_panels): *section_frontend(panels), *section_memory_manager(panels), *section_connector_node(panels), + *section_sink_metrics(panels), *section_kafka_native_metrics(panels), *section_network_connection(panels) ], diff --git a/grafana/risingwave-dev-dashboard.json b/grafana/risingwave-dev-dashboard.json index b57021d2cef98..9b9bb59829d23 100644 --- a/grafana/risingwave-dev-dashboard.json +++ b/grafana/risingwave-dev-dashboard.json @@ -1 +1 @@ -{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dev Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"table_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"table_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (total) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (avg per core) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RW cluster can configure multiple meta nodes to achieve high availability. One is the leader and the rest are the followers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_addr}} @ {{role}}","metric":"","query":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Meta Cluster","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":2},"height":null,"hideTimeOverride":false,"id":9,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The rate of successful recovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery Successful Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of failed reocovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Failed recovery attempts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Time spent in a successful recovery attempt","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency avg","metric":"","query":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Recovery","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":3},"height":null,"hideTimeOverride":false,"id":13,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(rows).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":15,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(MB/s).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RisingWave ingests barriers periodically to trigger computation and checkpoints. The frequency of barrier can be set by barrier_interval_ms. This metric shows how many rows are ingested between two consecutive barriers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":18,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} @ {{instance}}","metric":"","query":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) per barrier","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Upstream Status","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Source Split Change Events frequency by source_id and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Split Change Events frequency(events/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Kafka Consumer Lag Size by source_id, partition and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"high_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}}","metric":"","query":"high_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}} actor_id={{actor_id}}","metric":"","query":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kafka Consumer Lag Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows output by each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(sink_name) (group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (actor_id, sink_name))) by (sink_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_name}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(sink_name) (group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (actor_id, sink_name))) by (sink_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized view {{table_name}} table_id {{materialized_view_id}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill snapshot","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Snapshot Read Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been output from the backfill upstream","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Upstream Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of barriers that have been ingested but not completely processed. This metric reflects the current level of congestion within the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all_barrier","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"in_flight_barrier","metric":"","query":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The duration between the time point when the scheduled barrier needs to be sent and the time point when the barrier gets actually sent to all the compute nodes. Developers can thus detect any internal congestion.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":27,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_avg","metric":"","query":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Send Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_avg","metric":"","query":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier In-Flight Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p999 - {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_avg - {{instance}}","metric":"","query":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Sync Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":31,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_avg","metric":"","query":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Wait Commit Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of actors that have processed the earliest in-flight barriers per second. This metric helps users to detect potential congestion or stuck in the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Earliest In-Flight Barrier Progress","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":4},"height":null,"hideTimeOverride":false,"id":33,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When enabled, this metric shows the input throughput of each executor.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor {{actor_id}}->{{executor_identity}}","metric":"","query":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":35,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Backpressure","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Memory Usage (TaskLocalAlloc)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Materialzed View Memory Usage","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":38,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}->{{upstream_fragment_id}}","metric":"","query":"rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Processing Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Execution Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":44,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}} ","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss when insert {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"temporal join cache miss, table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total cached count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialize Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Over window cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Barrier Align","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":50,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}.{{side}}","metric":"","query":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Match Duration Per Second","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the number of join keys in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_entries{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_entries{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Entries","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the number of rows in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":53,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_rows{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_rows{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the size of rows in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":54,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_estimated_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_estimated_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Estimated Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of matched rows on the opposite side","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":55,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Matched Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Lookup miss count counts the number of aggregation key's cache miss per second.Lookup total count counts the number of rows processed per second.By diving these two metrics, one can derive the cache miss rate per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":56,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each Key/State","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":57,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level cache miss - table {{table_id}} actor {{actor_id}}}","metric":"","query":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each StreamChunk","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":58,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_cached_keys{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg cached keys count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_cached_keys{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_distinct_cached_keys{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg distinct cached keys count |table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_distinct_cached_keys{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each top_n executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":96},"height":null,"hideTimeOverride":false,"id":59,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n appendonly cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"TopN Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in temporal join executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":104},"height":null,"hideTimeOverride":false,"id":60,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal Join cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Cache Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in lookup executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":104},"height":null,"hideTimeOverride":false,"id":61,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lookup Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":112},"height":null,"hideTimeOverride":false,"id":62,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cached_entry_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cached entry count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cached_entry_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache lookup count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Over Window Executor Cache","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":5},"height":null,"hideTimeOverride":false,"id":63,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":64,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":65,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":66,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":67,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":8},"height":null,"hideTimeOverride":false,"id":68,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":8},"height":null,"hideTimeOverride":false,"id":69,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":70,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":16},"height":null,"hideTimeOverride":false,"id":71,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":16},"height":null,"hideTimeOverride":false,"id":72,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":73,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":24},"height":null,"hideTimeOverride":false,"id":74,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":24},"height":null,"hideTimeOverride":false,"id":75,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":76,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":32},"height":null,"hideTimeOverride":false,"id":77,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":32},"height":null,"hideTimeOverride":false,"id":78,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Avg Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors (Tokio)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":6},"height":null,"hideTimeOverride":false,"id":79,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":80,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Send Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":81,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Recv Throughput","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Exchange","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":7},"height":null,"hideTimeOverride":false,"id":82,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":83,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compute Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":84,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":85,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: actor_id={{actor_id}}, source_id={{source_id}})","metric":"","query":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Reader Errors by Type","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"User Streaming Errors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":86,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":87,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Exchange Recv Row Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":88,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mpp Task Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"All memory usage of batch executors in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":89,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mem Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":90,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Heartbeat Worker Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":91,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Row SeqScan Next Duration","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":92,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":93,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{table_id}} @ {{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Hummock has three parts of memory usage: 1. Meta Cache 2. Block CacheThis metric shows the real memory usage of each of these three caches.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":94,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":95,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Miss Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":96,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Iter keys flow","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":97,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p50 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p99 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts pmax - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Merged SSTs","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the latency of Get operations that have been issued to the state store.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":98,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the time spent on iterator initialization.Histogram of the time spent on iterator scanning.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":99,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":100,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter check count- {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":101,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive rate - {{table_id}} - {{type}}","metric":"","query":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"False-Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":102,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter false positive rate - {{table_id}} - {{type}}","metric":"","query":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter False-Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":103,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Slow Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":104,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":105,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":106,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":107,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Read Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":108,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Count - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of a single key-value pair when reading by operation Get.Operation Get gets a single key-value pair with respect to a caller-specified key. If the key does not exist in the storage, the size of key is counted into this metric and the size of value is 0.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":109,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of all the key-value paris when reading by operation Iter.Operation Iter scans a range of key-value pairs.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":110,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":111,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":112,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Read)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":113,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the real memory usage of uploader.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":114,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading memory - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading task size - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader Memory Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of time spent on compacting shared buffer to remote storage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":115,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Build and Sync Sstable Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":116,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Write Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":117,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"merge imm tasks - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploader spill tasks - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Tasks Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":118,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Merging tasks memory size - {{table_id}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploading tasks size - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Task Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":119,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write batch - {{table_id}} @ {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"l0 - {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":120,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":121,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write_batch_kv_pair_count - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Item Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":122,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sync - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the statistics of mem_table size on flush. By default only max (p100) is shown.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":123,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Mem Table Size (Max)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":124,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Sync Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Write)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":11},"height":null,"hideTimeOverride":false,"id":125,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":126,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size(KB) of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":127,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Size(KB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The of bytes that have been written by commit epoch per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":128,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{table_id}}","metric":"","query":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Commit Flush Bytes by Table","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":129,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Failure Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":130,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Success Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have been skipped.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":131,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{level}}-{{type}}","metric":"","query":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Skip Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg l0 select_level_count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":132,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task L0 Select Level Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg file count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":133,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task File Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The distribution of the compact task size triggered, including p90 and max. and categorize it according to different cg, levels and task types.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":134,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task Size Distribution","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that are running.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":135,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor_task_split_count - {{job}} @ {{instance}}","metric":"","query":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Running Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compact-task: The total time have been spent on compaction.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":136,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute_apply_version_duration_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task avg","metric":"","query":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range avg","metric":"","query":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"KBs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":137,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Write refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":138,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Bytes(GiB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Write amplification is the amount of bytes written to the remote storage by compaction for each one byte of flushed SSTable data. Write amplification is by definition higher than 1.0 because we write each piece of data to L0, and then write it again to an SSTable, and then compaction may read this piece of data and write it to a new SSTable, that's another write.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":139,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write amplification","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Amplification","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables that is being compacted at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":140,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"num of compact_task","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":141,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task}}","metric":"","query":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":142,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read/Write by Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":143,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read/Write by level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_bloom_filter, for observing bloom_filter size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":144,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_meta - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_file - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":145,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_key_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_value_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Item Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":146,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_epoch_count - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Stat","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total time of operations which read from remote storage when enable prefetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":147,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Remote Read Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":148,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{type}} @ {{instance}} ","metric":"","query":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Iter keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"bytes of Lsm tree needed to reach balance","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":149,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact pending bytes - {{group}} @ {{instance}} ","metric":"","query":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Compact Pending Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compression ratio of each level of the lsm tree","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":150,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lsm compression ratio - cg{{group}} @ L{{level}} - {{algorithm}} {{instance}} ","metric":"","query":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Level Compression Ratio","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Compaction","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":12},"height":null,"hideTimeOverride":false,"id":151,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":152,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":153,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":154,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":155,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":156,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Failure Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":157,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Retry Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"There are two types of operations: 1. GET, SELECT, and DELETE, they cost 0.0004 USD per 1000 requests. 2. PUT, COPY, POST, LIST, they cost 0.005 USD per 1000 requests.Reading from S3 across different regions impose extra cost. This metric assumes 0.01 USD per 1GB data transfer. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":158,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","format":"time_series","hide":true,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"(Cross Region) Data Transfer Cost","metric":"","query":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GET, SELECT, and all other Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"PUT, COPY, POST, LIST Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Realtime)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric uses the total size of data in S3 at this second to derive the cost of storing data for a whole month. The price is 0.023 USD per GB. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":159,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Monthly Storage Cost","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Monthly)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Object Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":13},"height":null,"hideTimeOverride":false,"id":160,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":161,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":162,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":163,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":164,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} size @ {{instance}}","metric":"","query":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":165,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache hit ratio @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Hit Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":166,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(refill_queue_total) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"refill queue length @ {{instance}}","metric":"","query":"sum(refill_queue_total) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Queue Length","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":167,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(data_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(data_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(data_refill_filtered_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data file cache refill - filtered @ {{instance}}","metric":"","query":"sum(rate(data_refill_filtered_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(meta_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(meta_refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":168,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - data file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - data file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - data file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(data_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - meta cache refill @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - meta cache refill @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - meta cache refill @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Tiered Cache","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":14},"height":null,"hideTimeOverride":false,"id":169,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":170,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p50 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p99 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time pmax - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lock Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":171,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p50 - {{method}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p99 - {{method}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time pmax - {{method}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Real Process Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":172,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version size","metric":"","query":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":173,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"current version id","metric":"","query":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"checkpoint version id","metric":"","query":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned version id","metric":"","query":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min safepoint version id","metric":"","query":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Id","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":174,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"max committed epoch","metric":"","query":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"safe epoch","metric":"","query":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned epoch","metric":"","query":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":175,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":176,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":177,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\nObjects are classified into 3 groups:\n- not referenced by versions: these object are being deleted from object store.\n- referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n- referenced by current version: these objects are in the latest version.\n\nAdditionally, a metric on all objects (including dangling ones) is updated with low-frequency. The metric is updated right before full GC. So subsequent full GC may reduce the actual value significantly, without updating the metric.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":178,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects (including dangling ones)","metric":"","query":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Refer to `Object Total Number` panel for classification of objects.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":179,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects, including dangling ones","metric":"","query":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of hummock version delta log","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":180,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"delta log total number","metric":"","query":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Delta Log Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"hummock version checkpoint latency","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":181,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_avg","metric":"","query":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Checkpoint Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When certain per compaction group threshold is exceeded (e.g. number of level 0 sub-level in LSMtree), write op to that compaction group is stopped temporarily. Check log for detail reason of write stop.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":182,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compaction_group_{{compaction_group_id}}","metric":"","query":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Stop Compaction Groups","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of attempts to trigger full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":183,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_trigger_count","metric":"","query":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Trigger Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"the object id watermark used in last full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":184,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_last_object_id_watermark","metric":"","query":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Last Watermark","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":185,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Event Loop Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The times of move_state_table occurs","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":186,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"move table cg{{group}}","metric":"","query":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Move State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of state_tables in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":187,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"state table cg{{group}}","metric":"","query":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of branched_sst in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":188,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"branched sst cg{{group}}","metric":"","query":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Branched SST Count","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":15},"height":null,"hideTimeOverride":false,"id":189,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total backup job count since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":190,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"job count","metric":"","query":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Latency of backup jobs since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":191,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p50 - {{state}}","metric":"","query":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p99 - {{state}}","metric":"","query":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time pmax - {{state}}","metric":"","query":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Process Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Backup Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":192,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":193,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":194,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Drop latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":195,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetCatalog latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Catalog Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":17},"height":null,"hideTimeOverride":false,"id":196,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":197,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"AddWorkerNode latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":198,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ListAllNodes latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Cluster Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":199,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":200,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"CreateMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":201,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"DropMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":202,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Flush latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Stream Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":19},"height":null,"hideTimeOverride":false,"id":203,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":204,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinVersionBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":205,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinSnapshotBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":206,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ReportCompactionTasks latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":207,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetNewSstIds latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":20},"height":null,"hideTimeOverride":false,"id":208,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":209,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":210,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_avg","metric":"","query":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"version_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":211,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latencyp90 - {{instance}} ","metric":"","query":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":212,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":213,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_avg","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":214,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":215,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_avg","metric":"","query":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC: Hummock Meta Client","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":21},"height":null,"hideTimeOverride":false,"id":216,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":217,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":218,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":219,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":220,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Running Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":221,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Rejected queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":222,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Completed Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":223,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":224,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Frontend","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":22},"height":null,"hideTimeOverride":false,"id":225,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":226,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager loop count per sec","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":227,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager watermark steps","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"watermark_time is the current lower watermark of cached data. physical_now is the current time of the machine. The diff (physical_now - watermark_time) shows how much data is cached.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":228,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between watermark_time and now (ms)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":229,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The allocated memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":230,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The active memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":231,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_evicted_watermark_time_diff_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"lru_evicted_watermark_time_diff_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between current watermark and evicted watermark time (ms) for actors","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":23},"height":null,"hideTimeOverride":false,"id":232,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":233,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":234,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":235,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":236,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Time since this client instance was created (milli seconds)","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_age{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_age{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Client Age","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current number of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current total size of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Size in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages transmitted (produced) to Kafka brokers","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Produced Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Received Count","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Level Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":237,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages awaiting transmission to broker","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count Pending to Transmit (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages in-flight to broker awaiting response","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Inflight Message Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of transmission errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Transmitting (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of receive errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Receiving (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of requests timed out","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Timeout Request Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker latency / round-trip time in milli seconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"RTT (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker throttling time in milliseconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throttle Time (per broker)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Broker Level Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":2},"height":null,"hideTimeOverride":false,"id":238,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Age of metadata from broker for this topic (milliseconds)","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}","metric":"","query":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Metadata_age Age","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch sizes in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch message counts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Messages","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Topic Level Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":3},"height":null,"hideTimeOverride":false,"id":239,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages ready to be produced in transmit queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message to be Transmitted","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of pre-fetched messages in fetch queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message in pre fetch queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Next offset to fetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Next offset to fetch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Last committed offset","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Committed Offset","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Partition Level Metrics","transformations":[],"transparent":false,"type":"row"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Kafka Native Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":25},"height":null,"hideTimeOverride":false,"id":240,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":241,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Network throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":242,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"S3 throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":243,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"gRPC throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":244,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} grpc {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"IO error rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":245,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Existing connection count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":246,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":247,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection err rate","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network connection","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(table_info, table_id)","description":"Reporting table id of the metric","hide":0,"includeAll":true,"label":"Table","multi":true,"name":"table","options":[],"query":{"query":"label_values(table_info, table_id)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dev_dashboard","uid":"Ecy3uV1nz","version":0} +{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dev Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"table_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"table_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (total) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage (avg per core) - {{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RW cluster can configure multiple meta nodes to achieve high availability. One is the leader and the rest are the followers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_addr}} @ {{role}}","metric":"","query":"sum(meta_num{job=~\"$job\",instance=~\"$node\"}) by (worker_addr,role)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Meta Cluster","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":2},"height":null,"hideTimeOverride":false,"id":9,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The rate of successful recovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery Successful Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of failed reocovery attempts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Failed recovery attempts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Time spent in a successful recovery attempt","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(recovery_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"recovery latency avg","metric":"","query":"sum by (le) (rate(recovery_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by (le) (rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Recovery latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Recovery","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":3},"height":null,"hideTimeOverride":false,"id":13,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(rows).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":15,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"rate(partition_input_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Each query is executed in parallel with a user-defined parallelism. This figure shows the throughput of each parallelism. The throughput of all the parallelism added up is equal to Source Throughput(MB/s).","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"RisingWave ingests barriers periodically to trigger computation and checkpoints. The frequency of barrier can be set by barrier_interval_ms. This metric shows how many rows are ingested between two consecutive barriers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":18,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} @ {{instance}}","metric":"","query":"rate(stream_source_rows_per_barrier_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) per barrier","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Upstream Status","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Source Split Change Events frequency by source_id and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_split_change_event_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Split Change Events frequency(events/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Kafka Consumer Lag Size by source_id, partition and actor_id","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"high_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}}","metric":"","query":"high_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}} partition={{partition}} actor_id={{actor_id}}","metric":"","query":"latest_message_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kafka Consumer Lag Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of rows streamed into each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_id}} {{sink_name}}","metric":"","query":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of rows streamed into each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id, actor_id) * on(actor_id) group_left(sink_name) sink_info{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_id}} {{sink_name}} - actor {{actor_id}}","metric":"","query":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id, actor_id) * on(actor_id) group_left(sink_name) sink_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s) per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mview {{table_id}} {{table_name}}","metric":"","query":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (actor_id, table_id) * on(actor_id, table_id) group_left(table_name) table_info{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mview {{table_id}} {{table_name}} - actor {{actor_id}}","metric":"","query":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (actor_id, table_id) * on(actor_id, table_id) group_left(table_name) table_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s) per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill snapshot","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Snapshot Read Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been output from the backfill upstream","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":27,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Upstream Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of barriers that have been ingested but not completely processed. This metric reflects the current level of congestion within the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all_barrier","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"in_flight_barrier","metric":"","query":"in_flight_barrier_nums{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The duration between the time point when the scheduled barrier needs to be sent and the time point when the barrier gets actually sent to all the compute nodes. Developers can thus detect any internal congestion.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_avg","metric":"","query":"rate(meta_barrier_send_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Send Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":31,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_avg","metric":"","query":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier In-Flight Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p999 - {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_avg - {{instance}}","metric":"","query":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Sync Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":33,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_avg","metric":"","query":"rate(meta_barrier_wait_commit_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Wait Commit Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of actors that have processed the earliest in-flight barriers per second. This metric helps users to detect potential congestion or stuck in the system.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"rate(stream_barrier_manager_progress{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Earliest In-Flight Barrier Progress","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":4},"height":null,"hideTimeOverride":false,"id":35,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When enabled, this metric shows the input throughput of each executor.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_identity}} actor {{actor_id}}","metric":"","query":"rate(stream_executor_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fragment {{fragment_id}}->{{downstream_fragment_id}}","metric":"","query":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Blocking Time Ratio (Backpressure)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":38,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"actor_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Memory Usage (TaskLocalAlloc)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"stream_memory_usage{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Materialzed View Memory Usage","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, upstream_fragment_id) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fragment {{fragment_id}}<-{{upstream_fragment_id}}","metric":"","query":"avg(rate(stream_actor_input_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, upstream_fragment_id) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_barrier_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_processing_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Processing Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":44,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_actor_execution_time{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Execution Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_in_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_out_record_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}} ","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss when insert {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_insert_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"temporal join cache miss, table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total cached count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialize Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":50,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache lookup count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_lookup_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_over_window_cache_miss_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Over Window Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Over window cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_over_window_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_over_window_cache_lookup_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} {{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, fragment_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le,fragment_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Barrier Align","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":53,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_join_actor_input_waiting_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":54,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}.{{side}}","metric":"","query":"rate(stream_join_match_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Match Duration Per Second","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Multiple rows with distinct primary keys may have the same join key. This metric counts the number of join keys in the executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":55,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of matched rows on the opposite side","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":56,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_matched_join_keys_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, fragment_id, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - fragment {{fragment_id}} table_id {{table_id}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, actor_id, table_id) (rate(stream_join_matched_join_keys_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, fragment_id, table_id) (rate(stream_join_matched_join_keys_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Matched Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Lookup miss count counts the number of aggregation key's cache miss per second.Lookup total count counts the number of rows processed per second.By diving these two metrics, one can derive the cache miss rate per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":57,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"distinct agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"distinct agg total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top n appendonly total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup executor cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup executor total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each Key/State","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":58,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level cache miss - table {{table_id}} actor {{actor_id}}}","metric":"","query":"rate(stream_agg_chunk_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_chunk_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each StreamChunk","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":59,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg cached keys count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_distinct_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg distinct cached keys count |table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_distinct_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of dirty (unflushed) groups in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":60,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_dirty_groups_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg dirty groups count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_dirty_groups_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Dirty Groups Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The total heap size of dirty (unflushed) groups in each hash aggregation executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":96},"height":null,"hideTimeOverride":false,"id":61,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_dirty_groups_heap_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stream agg dirty groups heap size | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_agg_dirty_groups_heap_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Dirty Groups Heap Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in each top_n executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":104},"height":null,"hideTimeOverride":false,"id":62,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"group top_n appendonly cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_group_top_n_appendonly_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"TopN Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in temporal join executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":104},"height":null,"hideTimeOverride":false,"id":63,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal Join cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_temporal_join_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Temporal Join Cache Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in lookup executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":112},"height":null,"hideTimeOverride":false,"id":64,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lookup cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_lookup_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lookup Cached Keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of keys cached in over window executor's executor cache.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":112},"height":null,"hideTimeOverride":false,"id":65,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_over_window_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"over window cached count | table {{table_id}} actor {{actor_id}}","metric":"","query":"stream_over_window_cached_entry_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Over Window Cached Keys","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":5},"height":null,"hideTimeOverride":false,"id":66,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":67,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":68,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":69,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_fast_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":70,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":8},"height":null,"hideTimeOverride":false,"id":71,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":8},"height":null,"hideTimeOverride":false,"id":72,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_slow_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":73,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":16},"height":null,"hideTimeOverride":false,"id":74,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":16},"height":null,"hideTimeOverride":false,"id":75,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_poll_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":76,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":24},"height":null,"hideTimeOverride":false,"id":77,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":24},"height":null,"hideTimeOverride":false,"id":78,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_idle_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":79,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":32},"height":null,"hideTimeOverride":false,"id":80,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":32},"height":null,"hideTimeOverride":false,"id":81,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(stream_actor_scheduled_cnt{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Avg Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors (Tokio)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":6},"height":null,"hideTimeOverride":false,"id":82,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":83,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Send Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":84,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Recv Throughput","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Exchange","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":7},"height":null,"hideTimeOverride":false,"id":85,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":86,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compute Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":87,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":88,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: actor_id={{actor_id}}, source_id={{source_id}})","metric":"","query":"sum(user_source_reader_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, actor_id, source_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Reader Errors by Type","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"User Streaming Errors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":89,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":90,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Exchange Recv Row Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":91,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_task_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mpp Task Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"All memory usage of batch executors in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":92,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_total_mem{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mem Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":93,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_heartbeat_worker_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Heartbeat Worker Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the memory usage of mem_table.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":94,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table size total - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table size - table id {{table_id}} instance id {{instance_id}} {{job}} @ {{instance}}","metric":"","query":"state_store_mem_table_memory_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Mem Table Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the item counts in mem_table.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":95,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table counts total - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mem_table count - table id {{table_id}} instance id {{instance_id}} {{job}} @ {{instance}}","metric":"","query":"state_store_mem_table_item_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Mem Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":96,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Row SeqScan Next Duration","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":97,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":98,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{table_id}} @ {{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Hummock has three parts of memory usage: 1. Meta Cache 2. Block CacheThis metric shows the real memory usage of each of these three caches.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":99,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":100,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Miss Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":101,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_iter_scan_key_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type, table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Iter keys flow","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":102,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p50 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p99 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts pmax - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Merged SSTs","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the latency of Get operations that have been issued to the state store.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":103,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of the time spent on iterator initialization.Histogram of the time spent on iterator scanning.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":104,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_init_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_init_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_init_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_scan_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":105,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive count - {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter check count- {{table_id}} - {{type}}","metric":"","query":"sum(irate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":106,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter positive rate - {{table_id}} - {{type}}","metric":"","query":"(sum(rate(state_store_read_req_bloom_filter_positive_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"False-Positive / Total","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":107,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter false positive rate - {{table_id}} - {{type}}","metric":"","query":"(((sum(rate(state_store_read_req_positive_but_non_exist_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id,type)))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Bloom Filter False-Positive Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":108,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_slow_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Slow Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":109,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instanc,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_shared_buffer_hit_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_in_process_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":110,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":111,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":112,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.5, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.5, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id) + sum((histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Read Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":113,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Count - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of a single key-value pair when reading by operation Get.Operation Get gets a single key-value pair with respect to a caller-specified key. If the key does not exist in the storage, the size of key is counted into this metric and the size of value is 0.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":114,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size of all the key-value paris when reading by operation Iter.Operation Iter scans a range of key-value pairs.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":115,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":116,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":117,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"state_store_iter_fetch_meta_cache_unhits{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Unhits","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Read)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":118,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the real memory usage of uploader.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":119,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading memory - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading task size - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_uploader_uploading_task_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader Memory Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Histogram of time spent on compacting shared buffer to remote storage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":120,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Build and Sync Sstable Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":121,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p50 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.5, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write p99 - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(0.99, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write pmax - materialized view {{materialized_view_id}}","metric":"","query":"sum(histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id)) * on(table_id) group_left(materialized_view_id) (group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Write Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":122,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"merge imm tasks - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_merge_imm_task_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploader spill tasks - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_spill_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Tasks Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":123,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Merging tasks memory size - {{table_id}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_merge_imm_memory_sz{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Uploading tasks size - {{uploader_stage}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_spill_task_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,uploader_stage)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Uploader - Task Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":124,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write batch - {{table_id}} @ {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"l0 - {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_sync_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":125,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":126,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write_batch_kv_pair_count - {{table_id}} @ {{instance}} ","metric":"","query":"sum(irate(state_store_write_batch_tuple_counts{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Item Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":127,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_write_batch_size_sum{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id) / sum(rate(state_store_write_batch_size_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sync - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_shared_buffer_to_sstable_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance) / sum(rate(compactor_shared_buffer_to_sstable_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric shows the statistics of mem_table size on flush. By default only max (p100) is shown.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":128,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, table_id, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_write_batch_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, table_id, job, instance) (rate(state_store_write_batch_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_write_batch_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, table_id, job, instance) (rate(state_store_write_batch_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Batch Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":129,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Sync Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock (Write)","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":11},"height":null,"hideTimeOverride":false,"id":130,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":131,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The size(KB) of SSTables at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":132,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SSTable Size(KB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The of bytes that have been written by commit epoch per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":133,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{table_id}}","metric":"","query":"sum(rate(storage_commit_write_throughput{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Commit Flush Bytes by Table","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":134,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result!='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Failure Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have completed or failed","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":135,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency{result='SUCCESS',job=~\"$job\",instance=~\"$node\"}) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Success Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that have been skipped.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":136,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{level}}-{{type}}","metric":"","query":"sum(rate(storage_skip_compact_frequency{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (level, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Skip Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg l0 select_level_count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":137,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_l0_compact_level_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_l0_compact_level_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task L0 Select Level Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg file count of the compact task, and categorize it according to different cg, levels and task types","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":138,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg cg{{group}}@{{type}}","metric":"","query":"sum by(le, group, type)(irate(storage_compact_task_file_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, group, type)(irate(storage_compact_task_file_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task File Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The distribution of the compact task size triggered, including p90 and max. and categorize it according to different cg, levels and task types.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":139,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - cg{{group}}@{{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_compact_task_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Task Size Distribution","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of compactions from one level to another level that are running.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":140,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor_task_split_count - {{job}} @ {{instance}}","metric":"","query":"avg(storage_compact_task_pending_num{job=~\"$job\",instance=~\"$node\"}) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Running Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compact-task: The total time have been spent on compaction.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":141,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_task_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compact_sst_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{job}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{job}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute_apply_version_duration_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(compute_refill_cache_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task avg","metric":"","query":"sum by(le)(rate(compactor_compact_task_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range avg","metric":"","query":"sum by(le)(rate(state_store_compact_sst_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"KBs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":142,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}}","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job) + sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(rate(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_fast_compact_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fast compact - {{job}}","metric":"","query":"sum(rate(compactor_fast_compact_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Write refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":143,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Bytes(GiB)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Write amplification is the amount of bytes written to the remote storage by compaction for each one byte of flushed SSTable data. Write amplification is by definition higher than 1.0 because we write each piece of data to L0, and then write it again to an SSTable, and then compaction may read this piece of data and write it to a new SSTable, that's another write.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":144,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write amplification","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) / sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"})","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Amplification","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of SSTables that is being compacted at each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":145,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"storage_level_compact_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting SSTable Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"num of compact_task","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":146,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task}}","metric":"","query":"storage_level_compact_task_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":147,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(rate(storage_level_compact_read_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(rate(storage_level_compact_read_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(rate(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read/Write by Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":148,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} write to next level","metric":"","query":"sum(irate(storage_level_compact_write_sstn{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from next level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_next{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cg{{group}}-L{{level_index}} read from current level","metric":"","query":"sum(irate(storage_level_compact_read_sstn_curr{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, group, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read/Write by level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_bloom_filter, for observing bloom_filter size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":149,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_meta - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_file - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":150,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_key_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_value_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Item Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":151,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_epoch_count - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Stat","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total time of operations which read from remote storage when enable prefetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":152,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Remote Read Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":153,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{type}} @ {{instance}} ","metric":"","query":"sum(rate(compactor_iter_scan_key_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Iter keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"bytes of Lsm tree needed to reach balance","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":154,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact pending bytes - {{group}} @ {{instance}} ","metric":"","query":"sum(storage_compact_pending_bytes{job=~\"$job\",instance=~\"$node\"}) by (instance, group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Compact Pending Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"compression ratio of each level of the lsm tree","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":155,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lsm compression ratio - cg{{group}} @ L{{level}} - {{algorithm}} {{instance}} ","metric":"","query":"sum(storage_compact_level_compression_ratio{job=~\"$job\",instance=~\"$node\"}) by (instance, group, level, algorithm)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Level Compression Ratio","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Compaction","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":12},"height":null,"hideTimeOverride":false,"id":156,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":157,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":158,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum{type!~'streaming_upload_write_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":159,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type!~'streaming_upload_write_bytes|streaming_read_read_bytes|streaming_read',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'upload|delete',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":160,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":161,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Failure Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":162,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(aws_sdk_retry_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(irate(s3_read_request_retry_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Retry Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"There are two types of operations: 1. GET, SELECT, and DELETE, they cost 0.0004 USD per 1000 requests. 2. PUT, COPY, POST, LIST, they cost 0.005 USD per 1000 requests.Reading from S3 across different regions impose extra cost. This metric assumes 0.01 USD per 1GB data transfer. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":163,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","format":"time_series","hide":true,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"(Cross Region) Data Transfer Cost","metric":"","query":"sum(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}) * 0.01 / 1000 / 1000 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GET, SELECT, and all other Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete',job=~\"$job\",instance=~\"$node\"}) * 0.0004 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"PUT, COPY, POST, LIST Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list',job=~\"$job\",instance=~\"$node\"}) * 0.005 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Realtime)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"This metric uses the total size of data in S3 at this second to derive the cost of storing data for a whole month. The price is 0.023 USD per GB. Please checkout AWS's pricing model for more accurate calculation.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":164,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Monthly Storage Cost","metric":"","query":"sum(storage_level_total_file_size{job=~\"$job\",instance=~\"$node\"}) by (instance) * 0.023 / 1000 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Monthly)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Object Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":13},"height":null,"hideTimeOverride":false,"id":165,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":166,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":167,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(foyer_storage_op_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, foyer, op, extra, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":168,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache - {{op}} {{extra}} @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, op, extra, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":169,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} size @ {{instance}}","metric":"","query":"sum(foyer_storage_total_bytes{job=~\"$job\",instance=~\"$node\"}) by (foyer, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":170,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{foyer}} file cache hit ratio @ {{instance}}","metric":"","query":"sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) / (sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"hit\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance) + sum(rate(foyer_storage_op_duration_count{op=\"lookup\",extra=\"miss\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (foyer, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Hit Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":171,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(refill_queue_total) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"refill queue length @ {{instance}}","metric":"","query":"sum(refill_queue_total) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Queue Length","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":172,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(refill_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(refill_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"sum(rate(refill_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (type, op, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":173,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{type}} file cache refill - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(refill_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, type, op, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Refill Latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Tiered Cache","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":14},"height":null,"hideTimeOverride":false,"id":174,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":175,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p50 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p99 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time pmax - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lock Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":176,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p50 - {{method}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p99 - {{method}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time pmax - {{method}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Real Process Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":177,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version size","metric":"","query":"storage_version_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":178,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"current version id","metric":"","query":"storage_current_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"checkpoint version id","metric":"","query":"storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned version id","metric":"","query":"storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min safepoint version id","metric":"","query":"storage_min_safepoint_version_id{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Id","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":179,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"max committed epoch","metric":"","query":"storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"safe epoch","metric":"","query":"storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned epoch","metric":"","query":"storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":180,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_value_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":181,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":182,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_count',table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\nObjects are classified into 3 groups:\n- not referenced by versions: these object are being deleted from object store.\n- referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n- referenced by current version: these objects are in the latest version.\n\nAdditionally, a metric on all objects (including dangling ones) is updated with low-frequency. The metric is updated right before full GC. So subsequent full GC may reduce the actual value significantly, without updating the metric.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":183,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects (including dangling ones)","metric":"","query":"storage_total_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Refer to `Object Total Number` panel for classification of objects.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":184,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all objects, including dangling ones","metric":"","query":"storage_total_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of hummock version delta log","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":185,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"delta log total number","metric":"","query":"storage_delta_log_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Delta Log Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"hummock version checkpoint latency","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":186,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(storage_version_checkpoint_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version_checkpoint_latency_avg","metric":"","query":"rate(storage_version_checkpoint_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(storage_version_checkpoint_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Checkpoint Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"When certain per compaction group threshold is exceeded (e.g. number of level 0 sub-level in LSMtree), write op to that compaction group is stopped temporarily. Check log for detail reason of write stop.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":187,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compaction_group_{{compaction_group_id}}","metric":"","query":"storage_write_stop_compaction_groups{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Stop Compaction Groups","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"total number of attempts to trigger full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":188,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_trigger_count","metric":"","query":"storage_full_gc_trigger_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Trigger Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"the object id watermark used in last full GC","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":189,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"full_gc_last_object_id_watermark","metric":"","query":"storage_full_gc_last_object_id_watermark{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Full GC Last Watermark","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":190,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(storage_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor consumed latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_consumed_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor iteration latency pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(irate(compactor_compaction_event_loop_iteration_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Event Loop Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The times of move_state_table occurs","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":191,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"move table cg{{group}}","metric":"","query":"sum(storage_move_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Move State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of state_tables in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":192,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"state table cg{{group}}","metric":"","query":"sum(irate(storage_state_table_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"State Table Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of branched_sst in each CG","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":193,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"branched sst cg{{group}}","metric":"","query":"sum(irate(storage_branched_sst_count{table_id=~\"$table|\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Branched SST Count","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":15},"height":null,"hideTimeOverride":false,"id":194,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total backup job count since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":195,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"job count","metric":"","query":"backup_job_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Latency of backup jobs since the Meta node starts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":196,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p50 - {{state}}","metric":"","query":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p99 - {{state}}","metric":"","query":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time pmax - {{state}}","metric":"","query":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Process Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Backup Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":197,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":198,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":199,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Drop latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":200,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetCatalog latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Catalog Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":17},"height":null,"hideTimeOverride":false,"id":201,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":202,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"AddWorkerNode latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":203,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ListAllNodes latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Cluster Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":204,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":205,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"CreateMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":206,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"DropMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":207,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Flush latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Stream Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":19},"height":null,"hideTimeOverride":false,"id":208,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":209,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinVersionBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":210,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinSnapshotBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":211,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ReportCompactionTasks latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":212,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds',job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetNewSstIds latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":20},"height":null,"hideTimeOverride":false,"id":213,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":214,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_report_compaction_task_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":215,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_avg","metric":"","query":"sum(irate(state_store_unpin_version_before_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"version_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":216,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latencyp90 - {{instance}} ","metric":"","query":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_pin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_unpin_snapshot_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":217,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_pin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_unpin_snapshot_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":218,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_avg","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":219,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":220,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_avg","metric":"","query":"sum(irate(state_store_report_compaction_task_latency_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC: Hummock Meta Client","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":21},"height":null,"hideTimeOverride":false,"id":221,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":222,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":223,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":224,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per Second (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":225,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Running Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":226,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Rejected queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":227,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The Number of Completed Queries (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":228,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Distributed Query Mode)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":229,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency (Local Query Mode)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Frontend","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":22},"height":null,"hideTimeOverride":false,"id":230,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":231,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(lru_runtime_loop_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager loop count per sec","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":232,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_watermark_step{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager watermark steps","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"watermark_time is the current lower watermark of cached data. physical_now is the current time of the machine. The diff (physical_now - watermark_time) shows how much data is cached.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":233,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_physical_now_ms{job=~\"$job\",instance=~\"$node\"} - lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between watermark_time and now (ms)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":234,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The allocated memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":235,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_active_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The active memory of jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":236,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jvm_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jvm_allocated_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The allocated memory of jvm","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":237,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jvm_active_bytes{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jvm_active_bytes{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The active memory of jvm","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":238,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"} - on() group_right() lru_evicted_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table {{table_id}} actor {{actor_id}} desc: {{desc}}","metric":"","query":"lru_current_watermark_time_ms{job=~\"$job\",instance=~\"$node\"} - on() group_right() lru_evicted_watermark_time_ms{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between current watermark and evicted watermark time (ms) for actors","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":23},"height":null,"hideTimeOverride":false,"id":239,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":240,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":241,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":242,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":243,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 @ {{connector}} {{sink_id}}","metric":"","query":"histogram_quantile(0.5, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 @ {{connector}} {{sink_id}}","metric":"","query":"histogram_quantile(0.99, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax @ {{connector}} {{sink_id}}","metric":"","query":"histogram_quantile(1.0, sum(rate(sink_commit_duration_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, connector, sink_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, connector, sink_id)(rate(sink_commit_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(sink_commit_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{connector}} @ {{sink_id}}","metric":"","query":"sum by(le, connector, sink_id)(rate(sink_commit_duration_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, type, job, instance) (rate(sink_commit_duration_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Commit Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":244,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"latest write epoch @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"latest read epoch @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Read/Write Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":245,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(max(log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Consume lag @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"(max(log_store_latest_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Lag","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":246,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"clamp_min((max(log_store_first_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000, 0)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Consume persistent log lag @ {{connector}} {{sink_id}} {{executor_id}}","metric":"","query":"clamp_min((max(log_store_first_write_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)- max(log_store_latest_read_epoch{job=~\"$job\",instance=~\"$node\"}) by (connector, sink_id, executor_id)) / (2^16) / 1000, 0)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Consume Persistent Log Lag","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":247,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}}","metric":"","query":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Consume Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":248,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}} @ {{executor_id}} {{instance}}","metric":"","query":"sum(rate(log_store_read_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Log Store Consume Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":249,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}}","metric":"","query":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Log Store Write Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":250,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector}} {{sink_id}} @ {{executor_id}} {{instance}}","metric":"","query":"sum(rate(log_store_write_rows{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, connector, sink_id, executor_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Log Store Write Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":251,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_read_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_read_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Read Storage Row Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":252,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_read_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_read_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Read Storage Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":253,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_write_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_write_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Write Storage Row Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":254,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(kv_log_store_storage_write_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_id}} - {{connector}} @ {{sink_id}}","metric":"","query":"sum(rate(kv_log_store_storage_write_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_id, connector, sink_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Kv Log Store Write Storage Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Sink Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":25},"height":null,"hideTimeOverride":false,"id":255,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current number of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":256,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Current total size of messages in producer queues","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":257,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_msg_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Size in Producer Queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages transmitted (produced) to Kafka brokers","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":258,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_tx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Produced Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":259,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id }}","metric":"","query":"rdkafka_top_rx_msgs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Received Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages awaiting transmission to broker","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":260,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_outbuf_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message Count Pending to Transmit (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages in-flight to broker awaiting response","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":261,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_waitresp_msg_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Inflight Message Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of transmission errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":262,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_tx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Transmitting (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of receive errors","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":263,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_rx_errs{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Error Count When Receiving (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of requests timed out","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":264,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, state {{ state }}","metric":"","query":"rdkafka_broker_req_timeouts{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Timeout Request Count (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker latency / round-trip time in milli seconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":265,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_rtt_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"RTT (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Broker throttling time in milliseconds","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":266,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_avg{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p75{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p90{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_p99_99{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}","metric":"","query":"rdkafka_broker_throttle_out_of_range{job=~\"$job\",instance=~\"$node\"}/1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throttle Time (per broker)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Age of metadata from broker for this topic (milliseconds)","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ms"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":267,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}","metric":"","query":"rdkafka_topic_metadata_age{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Metadata_age Age","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch sizes in bytes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":268,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchsize_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Batch message counts","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":null,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_avg{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p75{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p90{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_p99_99{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, broker {{ broker }}, topic {{ topic }}","metric":"","query":"rdkafka_topic_batchcnt_out_of_range{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Messages","transformations":[],"transparent":false,"type":"timeseries"}],"timeFrom":null,"timeShift":null,"title":"Topic Batch Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of messages ready to be produced in transmit queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":269,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_xmit_msgq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message to be Transmitted","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of pre-fetched messages in fetch queue","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":270,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_fetchq_cnt{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Message in pre fetch queue","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Next offset to fetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":271,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_next_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Next offset to fetch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Last committed offset","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":272,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"id {{ id }}, client_id {{ client_id}}, topic {{ topic }}, partition {{ partition }}","metric":"","query":"rdkafka_topic_partition_committed_offset{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Committed Offset","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Kafka Native Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":273,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":274,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Network throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":275,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"S3 throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":276,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total read @ {{instance}}","metric":"","query":"sum(rate(connection_read_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total write @ {{instance}}","metric":"","query":"sum(rate(connection_write_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance) / (1024*1024)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"gRPC throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":277,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(irate(connection_io_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} grpc {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} total {{op_type}} err[{{error_kind}}] @ {{instance}}","metric":"","query":"sum(rate(connection_io_err_rate{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, op_type, error_kind)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"IO error rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":278,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(connection_count{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Existing connection count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":279,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_create_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":280,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} S3 @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=\"S3\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} {{connection_type}} @ {{instance}}","metric":"","query":"sum(irate(connection_err_rate{connection_type=~\"grpc.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, connection_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create new connection err rate","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network connection","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(table_info, table_id)","description":"Reporting table id of the metric","hide":0,"includeAll":true,"label":"Table","multi":true,"name":"table","options":[],"query":{"query":"label_values(table_info, table_id)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dev_dashboard","uid":"Ecy3uV1nz","version":0} diff --git a/grafana/risingwave-user-dashboard.dashboard.py b/grafana/risingwave-user-dashboard.dashboard.py index dfe12c34904a5..c2b192c56f085 100644 --- a/grafana/risingwave-user-dashboard.dashboard.py +++ b/grafana/risingwave-user-dashboard.dashboard.py @@ -73,22 +73,22 @@ def section_overview(panels): ], ), panels.timeseries_rowsps( - "Aggregated Sink Throughput(rows/s)", - "The figure shows the number of rows output by each sink per second.", + "Sink Throughput(rows/s)", + "The number of rows streamed into each sink per second.", [ panels.target( - f"sum(rate({metric('stream_executor_row_count', filter=sink_filter)}[$__rate_interval])) by (executor_identity)", - "{{executor_identity}}", + f"sum(rate({metric('stream_sink_input_row_count')}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group({metric('sink_info')}) by (sink_id, sink_name)", + "sink {{sink_id}} {{sink_name}}", ), ], ), panels.timeseries_rowsps( - "Aggregated Materialized View Throughput(rows/s)", - "The figure shows the number of rows output by each materialized view per second.", + "Materialized View Throughput(rows/s)", + "The figure shows the number of rows written into each materialized view per second.", [ panels.target( - f"sum(rate({metric('stream_executor_row_count', filter=mv_filter)}[$__rate_interval])) by (executor_identity)", - "{{executor_identity}}", + f"sum(rate({metric('stream_mview_input_row_count')}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group({metric('table_info')}) by (table_id, table_name)", + "mview {{table_id}} {{table_name}}", ), ], ), @@ -328,33 +328,49 @@ def section_memory(outer_panels): f"rate({metric('stream_agg_lookup_miss_count')}[$__rate_interval])", "Agg - cache miss - table {{table_id}} actor {{actor_id}}", ), + panels.target( + f"rate({metric('stream_agg_lookup_total_count')}[$__rate_interval])", + "Agg - total lookups - table {{table_id}} actor {{actor_id}}", + ), panels.target( f"rate({metric('stream_agg_distinct_cache_miss_count')}[$__rate_interval])", - "Distinct agg cache miss - table {{table_id}} actor {{actor_id}}", + "Distinct agg - cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_agg_distinct_total_cache_count')}[$__rate_interval])", + "Distinct agg - total lookups - table {{table_id}} actor {{actor_id}}", ), panels.target( f"rate({metric('stream_group_top_n_cache_miss_count')}[$__rate_interval])", - "Group top n cache miss - table {{table_id}} actor {{actor_id}}", + "Group top n - cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_group_top_n_total_query_cache_count')}[$__rate_interval])", + "Group top n - total lookups - table {{table_id}} actor {{actor_id}}", ), - panels.target( f"rate({metric('stream_group_top_n_appendonly_cache_miss_count')}[$__rate_interval])", - "Group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}", + "Group top n appendonly - cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_group_top_n_appendonly_total_query_cache_count')}[$__rate_interval])", + "Group top n appendonly - total lookups - table {{table_id}} actor {{actor_id}}", ), - panels.target( f"rate({metric('stream_lookup_cache_miss_count')}[$__rate_interval])", - "Lookup executor cache miss - table {{table_id}} actor {{actor_id}}", + "Lookup executor - cache miss - table {{table_id}} actor {{actor_id}}", + ), + panels.target( + f"rate({metric('stream_lookup_total_query_cache_count')}[$__rate_interval])", + "Lookup executor - total lookups - table {{table_id}} actor {{actor_id}}", ), - panels.target( f"rate({metric('stream_temporal_join_cache_miss_count')}[$__rate_interval])", - "temporal join cache miss - table_id {{table_id}} actor {{actor_id}}", + "Temporal join - cache miss - table_id {{table_id}} actor {{actor_id}}", ), - panels.target( - f"rate({metric('stream_agg_lookup_total_count')}[$__rate_interval])", - "Agg - total lookups - table {{table_id}} actor {{actor_id}}", + f"rate({metric('stream_temporal_join_total_query_cache_count')}[$__rate_interval])", + "Temporal join - total lookups - table_id {{table_id}} actor {{actor_id}}", ), panels.target( f"rate({metric('stream_materialize_cache_hit_count')}[$__rate_interval])", @@ -386,22 +402,18 @@ def section_memory(outer_panels): f"(sum(rate({metric('stream_group_top_n_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_group_top_n_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_group_top_n_appendonly_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_group_top_n_appendonly_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_lookup_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_lookup_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"(sum(rate({metric('stream_temporal_join_cache_miss_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_temporal_join_total_query_cache_count')}[$__rate_interval])) by (table_id, actor_id))", "Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ", ), - panels.target( f"1 - (sum(rate({metric('stream_materialize_cache_hit_count')}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate({metric('stream_materialize_cache_total_count')}[$__rate_interval])) by (table_id, actor_id))", "materialize executor cache miss ratio - table {{table_id}} - actor {{actor_id}} {{instance}}", @@ -672,14 +684,14 @@ def section_streaming(outer_panels): ], ), panels.timeseries_percentage( - "Actor Backpressure", + "Actor Output Blocking Time Ratio (Backpressure)", "We first record the total blocking duration(ns) of output buffer of each actor. It shows how " "much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, " "on average. Then we divide this duration by 1 second and show it as a percentage.", [ panels.target( - f"rate({metric('stream_actor_output_buffer_blocking_duration_ns')}[$__rate_interval]) / 1000000000", - "{{actor_id}}", + f"avg(rate({metric('stream_actor_output_buffer_blocking_duration_ns')}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000", + "fragment {{fragment_id}}->{{downstream_fragment_id}}", ), ], ), diff --git a/grafana/risingwave-user-dashboard.json b/grafana/risingwave-user-dashboard.json index 0044f24f3313f..63e3ed095ac69 100644 --- a/grafana/risingwave-user-dashboard.json +++ b/grafana/risingwave-user-dashboard.json @@ -1 +1 @@ -{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":1},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Overview","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{source_name}}","metric":"","query":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":10},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id {{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows output by each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_identity}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*SinkExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows output by each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":18},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{executor_identity}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (executor_identity)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":9,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Alerts in the system group by type:\n - Too Many Barriers: there are too many uncommitted barriers generated. This means the streaming graph is stuck or under heavy load. Check 'Barrier Latency' panel.\n - Recovery Triggered: cluster recovery is triggered. Check 'Errors by Type' / 'Node Count' panels.\n - Lagging Version: the checkpointed or pinned version id is lagging behind the current version id. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Epoch: the pinned or safe epoch is lagging behind the current max committed epoch. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Compaction: there are too many files in L0. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Lagging Vacuum: there are too many stale files waiting to be cleaned. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Abnormal Meta Cache Memory: the meta cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Block Cache Memory: the block cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Uploading Memory Usage: uploading memory is more than 70 percent of the expected, and is about to spill.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Too Many Barriers","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recovery Triggered","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Version","metric":"","query":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Epoch","metric":"","query":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Compaction","metric":"","query":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Vacuum","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Meta Cache Memory","metric":"","query":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Block Cache Memory","metric":"","query":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Uploading Memory Usage","metric":"","query":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Alerts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Errors in the system group by type","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute error {{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"parse error {{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source error: source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote storage error {{type}}: {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Errors","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Local mode","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distributed mode","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Query QPS","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"height":null,"hideTimeOverride":false,"id":13,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions in frontend nodes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":50},"height":null,"hideTimeOverride":false,"id":15,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of CPU cores per RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Core Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"CPU","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":51},"height":null,"hideTimeOverride":false,"id":18,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Total)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(actor_memory_usage[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"streaming actor - {{actor_id}}","metric":"","query":"rate(actor_memory_usage[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage meta cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage block cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage write buffer - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Detailed)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Executor cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - total lookups - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"temporal join cache miss - table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - total cache count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"memory cache - {{table_id}} @ {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage bloom filter statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter total - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Bloom Filer","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage file cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","query":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage File Cache","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":52},"height":null,"hideTimeOverride":false,"id":27,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Send/Recv throughput per node for streaming exchange","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Send @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recv @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Streming Remote Exchange (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput per node","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Exchange Recv (Rows/s)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":53},"height":null,"hideTimeOverride":false,"id":31,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The storage size of each materialized view","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":33,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Compaction refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":35,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Compaction - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Size statistics for checkpoint","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}}","metric":"","query":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":54},"height":null,"hideTimeOverride":false,"id":38,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized executor actor per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized view {{table_name}} table_id {{materialized_view_id}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill operator used by MV on MV","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Read Snapshot - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Upstream - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Backpressure","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":55},"height":null,"hideTimeOverride":false,"id":44,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Running query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Rejected query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Completed query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Local Execution Mode","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":50,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dashboard","uid":"Fcy3uV1nz","version":0} +{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Actor/Table Id Info","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from actor id to fragment id","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"actor_info{job=~\"$job\",instance=~\"$node\"}","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"actor_info{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Id Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"color":{"mode":"thresholds"},"columns":[],"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Mapping from materialized view table id to it's internal table ids","editable":true,"error":false,"fieldConfig":{"defaults":{"custom":{"align":"auto","displayMode":"auto","filterable":true},"thresholds":{"mode":"absolute","steps":[]}},"overrides":[]},"fontSize":"100%","gridPos":{"h":8,"w":12,"x":12,"y":1},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"mappings":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"showHeader":true},"repeat":null,"repeatDirection":null,"span":6,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","format":"table","hide":false,"instant":true,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"group(table_info{job=~\"$job\",instance=~\"$node\"}) by (materialized_view_id, table_id, table_name, table_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Info","transformations":[{"id":"organize","options":{"excludeByName":{"Time":true,"Value":true,"__name__":true,"instance":true,"job":true}}}],"transparent":false,"type":"table"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Overview","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":10},"height":null,"hideTimeOverride":false,"id":5,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{source_name}}","metric":"","query":"sum(rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (source_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":10},"height":null,"hideTimeOverride":false,"id":6,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source_id {{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregated Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of rows streamed into each sink per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":7,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink {{sink_id}} {{sink_name}}","metric":"","query":"sum(rate(stream_sink_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (sink_id) * on(sink_id) group_left(sink_name) group(sink_info{job=~\"$job\",instance=~\"$node\"}) by (sink_id, sink_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Sink Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized view per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":18},"height":null,"hideTimeOverride":false,"id":8,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"mview {{table_id}} {{table_name}}","metric":"","query":"sum(rate(stream_mview_input_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id) * on(table_id) group_left(table_name) group(table_info{job=~\"$job\",instance=~\"$node\"}) by (table_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The time that the data between two consecutive barriers gets fully processed, i.e. the computation results are made durable into materialized views or sink to external systems. This metric shows to users the freshness of materialized views.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":9,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) / rate(meta_barrier_duration_seconds_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Alerts in the system group by type:\n - Too Many Barriers: there are too many uncommitted barriers generated. This means the streaming graph is stuck or under heavy load. Check 'Barrier Latency' panel.\n - Recovery Triggered: cluster recovery is triggered. Check 'Errors by Type' / 'Node Count' panels.\n - Lagging Version: the checkpointed or pinned version id is lagging behind the current version id. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Epoch: the pinned or safe epoch is lagging behind the current max committed epoch. Check 'Hummock Manager' section in dev dashboard.\n - Lagging Compaction: there are too many files in L0. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Lagging Vacuum: there are too many stale files waiting to be cleaned. This can be caused by compactor failure or lag of compactor resource. Check 'Compaction' section in dev dashboard.\n - Abnormal Meta Cache Memory: the meta cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Block Cache Memory: the block cache memory usage is too large, exceeding the expected 10 percent.\n - Abnormal Uploading Memory Usage: uploading memory is more than 70 percent of the expected, and is about to spill.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"height":null,"hideTimeOverride":false,"id":10,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Too Many Barriers","metric":"","query":"all_barrier_nums{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recovery Triggered","metric":"","query":"sum(rate(recovery_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) > bool 0 + sum(recovery_failure_cnt{job=~\"$job\",instance=~\"$node\"}) > bool 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Version","metric":"","query":"((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_checkpoint_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100) + ((storage_current_version_id{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_version_id{job=~\"$job\",instance=~\"$node\"}) >= bool 100)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Epoch","metric":"","query":"((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_min_pinned_epoch{job=~\"$job\",instance=~\"$node\"} == 0) + ((storage_max_committed_epoch{job=~\"$job\",instance=~\"$node\"} - storage_safe_epoch{job=~\"$job\",instance=~\"$node\"}) >= bool 6553600000 unless + storage_safe_epoch{job=~\"$job\",instance=~\"$node\"} == 0)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Compaction","metric":"","query":"sum(label_replace(storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}, 'L0', 'L0', 'level_index', '.*_L0') unless storage_level_sst_num{job=~\"$job\",instance=~\"$node\"}) by (L0) >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lagging Vacuum","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"} >= bool 200","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Meta Cache Memory","metric":"","query":"state_store_meta_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Block Cache Memory","metric":"","query":"state_store_block_cache_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 1.1","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Abnormal Uploading Memory Usage","metric":"","query":"state_store_uploading_memory_usage_ratio{job=~\"$job\",instance=~\"$node\"} >= bool 0.7","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Alerts","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Errors in the system group by type","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"height":null,"hideTimeOverride":false,"id":11,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compute error {{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"parse error {{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count{job=~\"$job\",instance=~\"$node\"}) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source error: source_id={{source_id}}, source_name={{source_name}} @ {{instance}}","metric":"","query":"source_status_is_up{job=~\"$job\",instance=~\"$node\"} == 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote storage error {{type}}: {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Errors","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"height":null,"hideTimeOverride":false,"id":12,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Local mode","metric":"","query":"rate(frontend_query_counter_local_execution{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distributed mode","metric":"","query":"rate(distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Query QPS","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of each type of RisingWave components alive.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"height":null,"hideTimeOverride":false,"id":13,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num{job=~\"$job\",instance=~\"$node\"}) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of active sessions in frontend nodes","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"height":null,"hideTimeOverride":false,"id":14,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"frontend_active_sessions{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Active Sessions","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":50},"height":null,"hideTimeOverride":false,"id":15,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The CPU usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":16,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Number of CPU cores per RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":17,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{instance}}","metric":"","query":"avg(process_cpu_core_num{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU Core Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"CPU","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":51},"height":null,"hideTimeOverride":false,"id":18,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The memory usage of each RisingWave component.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":19,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":20,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (instance) + sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Total)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":21,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(actor_memory_usage[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"streaming actor - {{actor_id}}","metric":"","query":"rate(actor_memory_usage[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage meta cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_meta_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage block cache - {{job}} @ {{instance}}","metric":"","query":"sum(state_store_block_cache_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"storage write buffer - {{job}} @ {{instance}}","metric":"","query":"sum(uploading_memory_size{job=~\"$job\",instance=~\"$node\"}) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized_view {{materialized_view_id}}","metric":"","query":"sum(stream_memory_usage{job=~\"$job\",instance=~\"$node\"} * on(table_id, actor_id) group_left(materialized_view_id) table_info) by (materialized_view_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Memory Usage (Detailed)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Executor cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":22,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - cache miss - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Join - total lookups - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Group top n appendonly - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor - cache miss - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lookup executor - total lookups - table {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal join - cache miss - table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Temporal join - total lookups - table_id {{table_id}} actor {{actor_id}}","metric":"","query":"rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - cache hit count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Materialize - total cache count - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":23,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"join executor cache miss ratio - - {{side}} side, join_table_id {{join_table_id}} degree_table_id {{degree_table_id}} actor {{actor_id}}","metric":"","query":"(sum(rate(stream_join_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id) ) / (sum(rate(stream_join_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (side, join_table_id, degree_table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_lookup_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_lookup_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Distinct agg cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_agg_distinct_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_agg_distinct_total_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream group top n appendonly cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_group_top_n_appendonly_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_group_top_n_appendonly_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream lookup cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_lookup_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_lookup_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Stream temporal join cache miss ratio - table {{table_id}} actor {{actor_id}} ","metric":"","query":"(sum(rate(stream_temporal_join_cache_miss_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_temporal_join_total_query_cache_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialize executor cache miss ratio - table {{table_id}} - actor {{actor_id}} {{instance}}","metric":"","query":"1 - (sum(rate(stream_materialize_cache_hit_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id) ) / (sum(rate(stream_materialize_cache_total_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (table_id, actor_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Cache Miss Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":24,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"memory cache - {{table_id}} @ {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total_meta_miss_count - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts{type='meta_miss',job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage bloom filter statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":25,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter total - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_check_bloom_filter_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_positive_but_non_exist_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Bloom Filer","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Storage file cache statistics","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":26,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_latency_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","query":"sum(rate(file_cache_miss{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage File Cache","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":52},"height":null,"hideTimeOverride":false,"id":27,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Send/Recv throughput per node for streaming exchange","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":28,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Send @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_send_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Recv @ {{instance}}","metric":"","query":"sum(rate(stream_exchange_frag_recv_size{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Streming Remote Exchange (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput per node","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":29,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":30,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_exchange_recv_row_number{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Exchange Recv (Rows/s)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Network","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":53},"height":null,"hideTimeOverride":false,"id":31,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":32,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_size{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The storage size of each materialized view","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":33,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{metric}}, mv id - {{table_id}} ","metric":"","query":"storage_materialized_view_stats{metric='materialized_view_total_size',job=~\"$job\",instance=~\"$node\"}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"\n Objects are classified into 3 groups:\n - not referenced by versions: these object are being deleted from object store.\n - referenced by non-current versions: these objects are stale (not in the latest version), but those old versions may still be in use (e.g. long-running pinning). Thus those objects cannot be deleted at the moment.\n - referenced by current version: these objects are in the latest version.\n ","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":34,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"not referenced by versions","metric":"","query":"storage_stale_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by non-current versions","metric":"","query":"storage_old_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"referenced by current version","metric":"","query":"storage_current_version_object_count{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Object Total Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The number of bytes that have been written by compaction.Flush refers to the process of compacting Memtables to SSTables at Level 0.Compaction refers to the process of compacting SSTables at one level to another level.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":35,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Compaction - {{job}}","metric":"","query":"sum(storage_level_compact_write{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes{job=~\"$job\",instance=~\"$node\"}) by (job) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The remote storage read/write throughput","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":36,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}}","metric":"","query":"sum(rate(object_store_read_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(rate(object_store_write_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Storage Remote I/O (Bytes/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Size statistics for checkpoint","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":37,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}}","metric":"","query":"sum by(le, job) (rate(state_store_sync_size_sum{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) / sum by(le, job) (rate(state_store_sync_size_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":54},"height":null,"hideTimeOverride":false,"id":38,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":39,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of bytes read by each source per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":40,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(MB/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"The figure shows the number of rows written into each materialized executor actor per second.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":41,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"materialized view {{table_name}} table_id {{materialized_view_id}}","metric":"","query":"sum(rate(stream_executor_row_count{executor_identity=~\".*MaterializeExecutor.*\",job=~\"$job\",instance=~\"$node\"}[$__rate_interval]) * on(actor_id) group_left(materialized_view_id, table_name) (group(table_info{table_type=~\"MATERIALIZED_VIEW\",job=~\"$job\",instance=~\"$node\"}) by (actor_id, materialized_view_id, table_name))) by (materialized_view_id, table_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Materialized View Throughput(rows/s)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"Total number of rows that have been read from the backfill operator used by MV on MV","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":42,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Read Snapshot - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_snapshot_read_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Upstream - table_id={{table_id}} actor={{actor_id}} @ {{instance}}","metric":"","query":"rate(stream_backfill_upstream_output_row_count{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Backfill Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"We first record the total blocking duration(ns) of output buffer of each actor. It shows how much time it takes an actor to process a message, i.e. a barrier, a watermark or rows of data, on average. Then we divide this duration by 1 second and show it as a percentage.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":43,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fragment {{fragment_id}}->{{downstream_fragment_id}}","metric":"","query":"avg(rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (fragment_id, downstream_fragment_id) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Blocking Time Ratio (Backpressure)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":55},"height":null,"hideTimeOverride":false,"id":44,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":45,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Running query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":46,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Rejected query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":47,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter{job=~\"$job\",instance=~\"$node\"}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Completed query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":48,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":49,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Local Execution Mode","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":50,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":51,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_type}} @ {{source_id}}","metric":"","query":"rate(connector_source_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":52,"interval":"1s","links":[],"maxDataPoints":1000,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sink={{connector_type}} @ {{sink_id}}","metric":"","query":"rate(connector_sink_rows_received{job=~\"$job\",instance=~\"$node\"}[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Connector Sink Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Connector Node","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, instance)","description":"Reporting instance of the metric","hide":0,"includeAll":true,"label":"Node","multi":true,"name":"node","options":[],"query":{"query":"label_values(process_cpu_seconds_total, instance)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"},{"current":{"selected":false,"text":"All","value":"__all"},"definition":"label_values(process_cpu_seconds_total, job)","description":"Reporting job of the metric","hide":0,"includeAll":true,"label":"Job","multi":true,"name":"job","options":[],"query":{"query":"label_values(process_cpu_seconds_total, job)","refId":"StandardVariableQuery"},"refresh":2,"regex":"","skipUrlSync":false,"sort":6,"type":"query"}]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dashboard","uid":"Fcy3uV1nz","version":0} diff --git a/integration_tests/citus-cdc/create_source.sql b/integration_tests/citus-cdc/create_source.sql index 106f7cda8f457..9b69abed873ad 100644 --- a/integration_tests/citus-cdc/create_source.sql +++ b/integration_tests/citus-cdc/create_source.sql @@ -20,5 +20,30 @@ CREATE TABLE orders_rw ( schema.name = 'public', table.name = 'orders', slot.name = 'orders_dbz_slot', - publication.create.enable = 'true' ); + +DROP TABLE orders_rw; + +CREATE TABLE orders_rw ( + o_orderkey bigint, + o_custkey bigint, + o_orderstatus varchar, + o_totalprice decimal, + o_orderdate date, + o_orderpriority varchar, + o_clerk varchar, + o_shippriority bigint, + o_comment varchar, + PRIMARY KEY (o_orderkey) +) WITH ( + connector = 'citus-cdc', + hostname = 'citus-master', + port = '5432', + username = 'myuser', + password = '123456', + database.servers = 'citus-worker-1:5432,citus-worker-2:5432', + database.name = 'mydb', + schema.name = 'public', + table.name = 'orders', + slot.name = 'orders_dbz_slot' +); \ No newline at end of file diff --git a/integration_tests/clickhouse-sink/README.md b/integration_tests/clickhouse-sink/README.md index 607621faefeae..a383f3fba5ee4 100644 --- a/integration_tests/clickhouse-sink/README.md +++ b/integration_tests/clickhouse-sink/README.md @@ -23,6 +23,8 @@ docker compose exec clickhouse-server bash /opt/clickhouse/clickhouse-sql/run-sq - create_mv.sql - create_sink.sql +We only support `upsert` with clickhouse' `CollapsingMergeTree` and `VersionedCollapsingMergeTree` + 4. Execute a simple query: ```sh diff --git a/integration_tests/datagen/ad_click/ad_click.go b/integration_tests/datagen/ad_click/ad_click.go index 9ce71ae3f36bc..27928d3694e26 100644 --- a/integration_tests/datagen/ad_click/ad_click.go +++ b/integration_tests/datagen/ad_click/ad_click.go @@ -54,8 +54,8 @@ func (g *adClickGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { record := &clickEvent{ UserId: rand.Int63n(100000), AdId: rand.Int63n(10), - ClickTimestamp: now.Add(time.Duration(rand.Intn(1000)) * time.Millisecond).Format(gen.RwTimestampLayout), - ImpressionTimestamp: now.Format(gen.RwTimestampLayout), + ClickTimestamp: now.Add(time.Duration(rand.Intn(1000)) * time.Millisecond).Format(gen.RwTimestamptzLayout), + ImpressionTimestamp: now.Format(gen.RwTimestamptzLayout), } select { case <-ctx.Done(): diff --git a/integration_tests/datagen/ad_ctr/ad_ctr.go b/integration_tests/datagen/ad_ctr/ad_ctr.go index 1134ce4c1e895..cd3000e33407e 100644 --- a/integration_tests/datagen/ad_ctr/ad_ctr.go +++ b/integration_tests/datagen/ad_ctr/ad_ctr.go @@ -96,14 +96,14 @@ func (g *adCtrGen) generate() []sink.SinkRecord { &adImpressionEvent{ BidId: bidId, AdId: adId, - ImpressionTimestamp: time.Now().Format(gen.RwTimestampLayout), + ImpressionTimestamp: time.Now().Format(gen.RwTimestamptzLayout), }, } if g.hasClick(adId) { randomDelay := time.Duration(g.faker.IntRange(1, 10) * int(time.Second)) events = append(events, &adClickEvent{ BidId: bidId, - ClickTimestamp: time.Now().Add(randomDelay).Format(gen.RwTimestampLayout), + ClickTimestamp: time.Now().Add(randomDelay).Format(gen.RwTimestamptzLayout), }) } return events diff --git a/integration_tests/datagen/cdn_metrics/nics.go b/integration_tests/datagen/cdn_metrics/nics.go index 6aae95479ec9f..a95be61012115 100644 --- a/integration_tests/datagen/cdn_metrics/nics.go +++ b/integration_tests/datagen/cdn_metrics/nics.go @@ -109,7 +109,7 @@ func (impl *deviceNicsMonitor) newMetrics( MetricName: metricName, Aggregation: aggregation, NicName: "eth" + strconv.Itoa(NicId), - ReportTime: reportTime.Format(gen.RwTimestampLayout), + ReportTime: reportTime.Format(gen.RwTimestamptzLayout), Bandwidth: maxBandwidth, Value: float64(value), } diff --git a/integration_tests/datagen/cdn_metrics/tcp.go b/integration_tests/datagen/cdn_metrics/tcp.go index da7ce31d76dd3..f315a7572d4b6 100644 --- a/integration_tests/datagen/cdn_metrics/tcp.go +++ b/integration_tests/datagen/cdn_metrics/tcp.go @@ -90,7 +90,7 @@ func (m *deviceTcpMonitor) newMetrics(metricName string, reportTime time.Time, v return &tcpMetric{ DeviceId: m.deviceId, MetricName: metricName, - ReportTime: reportTime.Format(gen.RwTimestampLayout), + ReportTime: reportTime.Format(gen.RwTimestamptzLayout), Value: value, } } diff --git a/integration_tests/datagen/clickstream/clickstream.go b/integration_tests/datagen/clickstream/clickstream.go index c0e9350b3f2b1..201610a299283 100644 --- a/integration_tests/datagen/clickstream/clickstream.go +++ b/integration_tests/datagen/clickstream/clickstream.go @@ -138,7 +138,7 @@ func (g *clickStreamGen) generate() sink.SinkRecord { UserId: fmt.Sprint(userId), TargetId: string(target) + fmt.Sprint(targetId), TargetType: string(target), - EventTimestamp: time.Now().Format(gen.RwTimestampLayout), + EventTimestamp: time.Now().Format(gen.RwTimestamptzLayout), BehaviorType: behavior, ParentTargetType: parentTargetType, ParentTargetId: parentTargetId, diff --git a/integration_tests/datagen/delivery/delivery.go b/integration_tests/datagen/delivery/delivery.go index 0ca20dd689fea..d8e1133f71497 100644 --- a/integration_tests/datagen/delivery/delivery.go +++ b/integration_tests/datagen/delivery/delivery.go @@ -69,7 +69,7 @@ func (g *orderEventGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) OrderId: g.seqOrderId, RestaurantId: rand.Int63n(num_of_restaurants), OrderState: order_states[rand.Intn(len(order_states))], - OrderTimestamp: now.Add(time.Duration(rand.Intn(total_minutes)) * time.Minute).Format(gen.RwTimestampLayout), + OrderTimestamp: now.Add(time.Duration(rand.Intn(total_minutes)) * time.Minute).Format(gen.RwTimestampNaiveLayout), } g.seqOrderId++ select { diff --git a/integration_tests/datagen/ecommerce/ecommerce.go b/integration_tests/datagen/ecommerce/ecommerce.go index 18520c9b7eb60..34ee31cde6931 100644 --- a/integration_tests/datagen/ecommerce/ecommerce.go +++ b/integration_tests/datagen/ecommerce/ecommerce.go @@ -103,7 +103,7 @@ func (g *ecommerceGen) KafkaTopics() []string { } func (g *ecommerceGen) generate() []sink.SinkRecord { - ts := time.Now().Format(gen.RwTimestampLayout) + ts := time.Now().Format(gen.RwTimestampNaiveLayout) if g.faker.Bool() && g.seqShipId >= g.seqOrderId { // New order. diff --git a/integration_tests/datagen/gen/generator.go b/integration_tests/datagen/gen/generator.go index d519beec08c35..f84ffe3fcdea4 100644 --- a/integration_tests/datagen/gen/generator.go +++ b/integration_tests/datagen/gen/generator.go @@ -9,6 +9,7 @@ import ( "datagen/sink/postgres" "datagen/sink/pulsar" "datagen/sink/s3" + "time" "gonum.org/v1/gonum/stat/distuv" ) @@ -47,7 +48,8 @@ type LoadGenerator interface { Load(ctx context.Context, outCh chan<- sink.SinkRecord) } -const RwTimestampLayout = "2006-01-02 15:04:05.07+01:00" +const RwTimestampNaiveLayout = time.DateTime +const RwTimestamptzLayout = time.RFC3339 type RandDist interface { // Rand returns a random number ranging from [0, max]. diff --git a/integration_tests/datagen/go.mod b/integration_tests/datagen/go.mod index 9316a7df13a3b..1700fbf227123 100644 --- a/integration_tests/datagen/go.mod +++ b/integration_tests/datagen/go.mod @@ -67,12 +67,12 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/stretchr/testify v1.8.0 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect - golang.org/x/net v0.7.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/integration_tests/datagen/go.sum b/integration_tests/datagen/go.sum index 34ac14123e168..9cb52a458cb1f 100644 --- a/integration_tests/datagen/go.sum +++ b/integration_tests/datagen/go.sum @@ -446,8 +446,9 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -529,8 +530,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -617,12 +618,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -632,7 +633,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/integration_tests/datagen/twitter/twitter.go b/integration_tests/datagen/twitter/twitter.go index 06a235aaf7d02..1daf193c36e6f 100644 --- a/integration_tests/datagen/twitter/twitter.go +++ b/integration_tests/datagen/twitter/twitter.go @@ -120,7 +120,7 @@ func NewTwitterGen() gen.LoadGenerator { endTime, _ := time.Parse("2006-01-01", fmt.Sprintf("%d-01-01", endYear)) startTime, _ := time.Parse("2006-01-01", fmt.Sprintf("%d-01-01", startYear)) users[id] = &twitterUser{ - CreatedAt: faker.DateRange(startTime, endTime).Format(gen.RwTimestampLayout), + CreatedAt: faker.DateRange(startTime, endTime).Format(gen.RwTimestamptzLayout), Id: id, Name: fmt.Sprintf("%s %s", faker.Name(), faker.Adverb()), UserName: faker.Username(), @@ -152,7 +152,7 @@ func (t *twitterGen) generate() twitterEvent { return twitterEvent{ Data: tweetData{ Id: id, - CreatedAt: time.Now().Format(gen.RwTimestampLayout), + CreatedAt: time.Now().Format(gen.RwTimestamptzLayout), Text: sentence, Lang: gofakeit.Language(), }, diff --git a/integration_tests/doris-sink/README.md b/integration_tests/doris-sink/README.md new file mode 100644 index 0000000000000..add7db0a0aaa8 --- /dev/null +++ b/integration_tests/doris-sink/README.md @@ -0,0 +1,64 @@ +# Demo: Sinking to Doris + +In this demo, we want to showcase how RisingWave is able to sink data to Doris. + +1. Modify max_map_count + +```sh +sysctl -w vm.max_map_count=2000000 +``` + +If, after running these commands, Docker still encounters Doris startup errors, please refer to: https://doris.apache.org/docs/dev/install/construct-docker/run-docker-cluster + + +2. Launch the cluster: + +```sh +docker-compose up -d +``` + +The cluster contains a RisingWave cluster and its necessary dependencies, a datagen that generates the data, a Doris fe and be for sink. + +3. Create the Doris table via mysql: + +Login to mysql +```sh +docker compose exec fe mysql -uroot -P9030 -h127.0.0.1 +``` + +Run the following queries to create database and table. +```sql +CREATE database demo; +use demo; +CREATE table demo_bhv_table( + user_id int, + target_id text, + event_timestamp datetime +) UNIQUE KEY(`user_id`) +DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 +PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" +); +CREATE USER 'users'@'%' IDENTIFIED BY '123456'; +GRANT ALL ON *.* TO 'users'@'%'; +``` + +4. Execute the SQL queries in sequence: + +- append-only sql: + - append-only/create_source.sql + - append-only/create_mv.sql + - append-only/create_sink.sql + +- upsert sql: + - upsert/create_table.sql + - upsert/create_mv.sql + - upsert/create_sink.sql + - upsert/insert_update_delete.sql + +We only support `upsert` with doris' `UNIQUE KEY` + +Run the following query +```sql +select user_id, count(*) from demo.demo_bhv_table group by user_id; +``` diff --git a/integration_tests/doris-sink/append-only-sql/create_mv.sql b/integration_tests/doris-sink/append-only-sql/create_mv.sql new file mode 100644 index 0000000000000..0a803f8a2762d --- /dev/null +++ b/integration_tests/doris-sink/append-only-sql/create_mv.sql @@ -0,0 +1,7 @@ +CREATE MATERIALIZED VIEW bhv_mv AS +SELECT + user_id, + target_id, + event_timestamp +FROM + user_behaviors; \ No newline at end of file diff --git a/integration_tests/doris-sink/append-only-sql/create_sink.sql b/integration_tests/doris-sink/append-only-sql/create_sink.sql new file mode 100644 index 0000000000000..fa0cfddf7bf16 --- /dev/null +++ b/integration_tests/doris-sink/append-only-sql/create_sink.sql @@ -0,0 +1,12 @@ +CREATE SINK bhv_doris_sink +FROM + bhv_mv WITH ( + connector = 'doris', + type = 'append-only', + doris.url = 'http://fe:8030', + doris.user = 'users', + doris.password = '123456', + doris.database = 'demo', + doris.table='demo_bhv_table', + force_append_only='true' +); \ No newline at end of file diff --git a/integration_tests/doris-sink/append-only-sql/create_source.sql b/integration_tests/doris-sink/append-only-sql/create_source.sql new file mode 100644 index 0000000000000..c28c10f3616da --- /dev/null +++ b/integration_tests/doris-sink/append-only-sql/create_source.sql @@ -0,0 +1,18 @@ +CREATE table user_behaviors ( + user_id int, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMP, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR, + PRIMARY KEY(user_id) +) WITH ( + connector = 'datagen', + fields.user_id.kind = 'sequence', + fields.user_id.start = '1', + fields.user_id.end = '1000', + fields.user_name.kind = 'random', + fields.user_name.length = '10', + datagen.rows.per.second = '10' +) FORMAT PLAIN ENCODE JSON; \ No newline at end of file diff --git a/integration_tests/doris-sink/docker-compose.yml b/integration_tests/doris-sink/docker-compose.yml new file mode 100644 index 0000000000000..697a6ac1880ea --- /dev/null +++ b/integration_tests/doris-sink/docker-compose.yml @@ -0,0 +1,104 @@ +--- +version: "3" +services: + fe: + image: apache/doris:2.0.0_alpha-fe-x86_64 + hostname: fe + environment: + - FE_SERVERS=fe1:172.21.0.2:9010 + - FE_ID=1 + ports: + - "8030:8030" + - "9030:9030" + networks: + mynetwork: + ipv4_address: 172.21.0.2 + be: + image: apache/doris:2.0.0_alpha-be-x86_64 + hostname: be + environment: + - FE_SERVERS=fe1:172.21.0.2:9010 + - BE_ADDR=172.21.0.3:9050 + depends_on: + - fe + ports: + - "9050:9050" + networks: + mynetwork: + ipv4_address: 172.21.0.3 + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + networks: + mynetwork: + ipv4_address: 172.21.0.4 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + networks: + mynetwork: + ipv4_address: 172.21.0.5 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + networks: + mynetwork: + ipv4_address: 172.21.0.6 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + networks: + mynetwork: + ipv4_address: 172.21.0.7 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + networks: + mynetwork: + ipv4_address: 172.21.0.8 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + networks: + mynetwork: + ipv4_address: 172.21.0.9 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + networks: + mynetwork: + ipv4_address: 172.21.0.10 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + networks: + mynetwork: + ipv4_address: 172.21.0.11 +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose +networks: + mynetwork: + ipam: + config: + - subnet: 172.21.80.0/16 + default: \ No newline at end of file diff --git a/integration_tests/doris-sink/upsert/create_mv.sql b/integration_tests/doris-sink/upsert/create_mv.sql new file mode 100644 index 0000000000000..0a803f8a2762d --- /dev/null +++ b/integration_tests/doris-sink/upsert/create_mv.sql @@ -0,0 +1,7 @@ +CREATE MATERIALIZED VIEW bhv_mv AS +SELECT + user_id, + target_id, + event_timestamp +FROM + user_behaviors; \ No newline at end of file diff --git a/integration_tests/doris-sink/upsert/create_sink.sql b/integration_tests/doris-sink/upsert/create_sink.sql new file mode 100644 index 0000000000000..e7bd5445ba557 --- /dev/null +++ b/integration_tests/doris-sink/upsert/create_sink.sql @@ -0,0 +1,12 @@ +CREATE SINK bhv_doris_sink +FROM + bhv_mv WITH ( + connector = 'doris', + type = 'upsert', + doris.url = 'http://fe:8030', + doris.user = 'users', + doris.password = '123456', + doris.database = 'demo', + doris.table='demo_bhv_table', + primary_key = 'user_id' +); \ No newline at end of file diff --git a/integration_tests/doris-sink/upsert/create_table.sql b/integration_tests/doris-sink/upsert/create_table.sql new file mode 100644 index 0000000000000..6c98f88a0b510 --- /dev/null +++ b/integration_tests/doris-sink/upsert/create_table.sql @@ -0,0 +1,10 @@ +CREATE table user_behaviors ( + user_id int, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMP, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR, + PRIMARY KEY(user_id) +); \ No newline at end of file diff --git a/integration_tests/doris-sink/upsert/insert_update_delete.sql b/integration_tests/doris-sink/upsert/insert_update_delete.sql new file mode 100644 index 0000000000000..73d5cda442258 --- /dev/null +++ b/integration_tests/doris-sink/upsert/insert_update_delete.sql @@ -0,0 +1,8 @@ +INSERT INTO user_behaviors VALUES(1,'1','1','2020-01-01 01:01:01','1','1','1'), +(2,'2','2','2020-01-01 01:01:02','2','2','2'), +(3,'3','3','2020-01-01 01:01:03','3','3','3'), +(4,'4','4','2020-01-01 01:01:04','4','4','4'); + +DELETE FROM user_behaviors WHERE user_id = 2; + +UPDATE user_behaviors SET target_id = 30 WHERE user_id = 3; \ No newline at end of file diff --git a/integration_tests/elasticsearch-sink/README.md b/integration_tests/elasticsearch-sink/README.md new file mode 100644 index 0000000000000..b114e8132024a --- /dev/null +++ b/integration_tests/elasticsearch-sink/README.md @@ -0,0 +1,41 @@ +# Demo: Sinking to ElasticSearch + +In this demo, we want to showcase how RisingWave is able to sink data to ElasticSearch. + +1. Set the compose profile accordingly: +Demo with elasticsearch 7: +``` +export COMPOSE_PROFILES=es7 +``` + +Demo with elasticsearch 8 +``` +export COMPOSE_PROFILES=es8 +``` + +2. Launch the cluster: + +```sh +docker-compose up -d +``` + +The cluster contains a RisingWave cluster and its necessary dependencies, a datagen that generates the data, a single-node elasticsearch for sink. + +3. Execute the SQL queries in sequence: + +- create_source.sql +- create_mv.sql +- create_es[7/8]_sink.sql + +4. Check the contents in ES: + +```sh +# Check the document counts +curl -XGET -u elastic:risingwave "http://localhost:9200/test/_count" -H 'Content-Type: application/json' + +# Check the content of a document by user_id +curl -XGET -u elastic:risingwave "http://localhost:9200/test/_search" -H 'Content-Type: application/json' -d '{"query":{"term": {"user_id":2}}' | jq + +# Get the first 10 documents sort by user_id +curl -XGET -u elastic:risingwave "http://localhost:9200/test/_search?size=10" -H 'Content-Type: application/json' -d'{"query":{"match_all":{}}, "sort": ["user_id"]}' | jq +``` \ No newline at end of file diff --git a/integration_tests/elasticsearch-sink/create_es7_sink.sql b/integration_tests/elasticsearch-sink/create_es7_sink.sql new file mode 100644 index 0000000000000..997c238b90344 --- /dev/null +++ b/integration_tests/elasticsearch-sink/create_es7_sink.sql @@ -0,0 +1,9 @@ +CREATE SINK bhv_es_sink +FROM + bhv_mv WITH ( + connector = 'elasticsearch', + index = 'test', + url = 'http://elasticsearch8:9200', + username = 'elastic', + password = 'risingwave' +); \ No newline at end of file diff --git a/integration_tests/elasticsearch-sink/create_es8_sink.sql b/integration_tests/elasticsearch-sink/create_es8_sink.sql new file mode 100644 index 0000000000000..997c238b90344 --- /dev/null +++ b/integration_tests/elasticsearch-sink/create_es8_sink.sql @@ -0,0 +1,9 @@ +CREATE SINK bhv_es_sink +FROM + bhv_mv WITH ( + connector = 'elasticsearch', + index = 'test', + url = 'http://elasticsearch8:9200', + username = 'elastic', + password = 'risingwave' +); \ No newline at end of file diff --git a/integration_tests/elasticsearch-sink/create_mv.sql b/integration_tests/elasticsearch-sink/create_mv.sql new file mode 100644 index 0000000000000..0a803f8a2762d --- /dev/null +++ b/integration_tests/elasticsearch-sink/create_mv.sql @@ -0,0 +1,7 @@ +CREATE MATERIALIZED VIEW bhv_mv AS +SELECT + user_id, + target_id, + event_timestamp +FROM + user_behaviors; \ No newline at end of file diff --git a/integration_tests/elasticsearch-sink/create_source.sql b/integration_tests/elasticsearch-sink/create_source.sql new file mode 100644 index 0000000000000..c28c10f3616da --- /dev/null +++ b/integration_tests/elasticsearch-sink/create_source.sql @@ -0,0 +1,18 @@ +CREATE table user_behaviors ( + user_id int, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMP, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR, + PRIMARY KEY(user_id) +) WITH ( + connector = 'datagen', + fields.user_id.kind = 'sequence', + fields.user_id.start = '1', + fields.user_id.end = '1000', + fields.user_name.kind = 'random', + fields.user_name.length = '10', + datagen.rows.per.second = '10' +) FORMAT PLAIN ENCODE JSON; \ No newline at end of file diff --git a/integration_tests/elasticsearch-sink/docker-compose.yml b/integration_tests/elasticsearch-sink/docker-compose.yml new file mode 100644 index 0000000000000..47d314d1f57e2 --- /dev/null +++ b/integration_tests/elasticsearch-sink/docker-compose.yml @@ -0,0 +1,73 @@ +--- +version: "3" +services: + elasticsearch7: + image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0 + environment: + - xpack.security.enabled=true + - discovery.type=single-node + - ELASTIC_PASSWORD=risingwave + ports: + - 9200:9200 + profiles: + - es7 + elasticsearch8: + image: docker.elastic.co/elasticsearch/elasticsearch:8.10.0 + environment: + - xpack.security.enabled=true + - discovery.type=single-node + - ELASTIC_PASSWORD=risingwave + ports: + - 9200:9200 + profiles: + - es8 + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/feature-store/Dockerfile b/integration_tests/feature-store/Dockerfile index dc0d02bd79682..7aadc5fb15d72 100644 --- a/integration_tests/feature-store/Dockerfile +++ b/integration_tests/feature-store/Dockerfile @@ -1,5 +1,6 @@ -FROM rust:1.67 as feature-store-server +FROM ubuntu:20.04 AS feature-store-server ARG BUILD_ARG +ENV DEBIAN_FRONTEND=noninteractive USER root @@ -9,8 +10,11 @@ RUN mkdir -p $WORK_DIR WORKDIR $WORK_DIR RUN apt update -RUN apt install -y python3 python3-pip wget ca-certificates -RUN apt install -y postgresql-client +RUN apt install -y lsof curl openssl libssl-dev pkg-config build-essential \ + cmake \ + python3 python3-pip wget ca-certificates \ + postgresql-client \ + protobuf-compiler ADD ./server/model/requirements.txt $WORK_DIR/model-pipreqs.txt ADD ./generator/requirements.txt $WORK_DIR/generator-pipreqs.txt @@ -19,8 +23,8 @@ RUN pip3 install -r $WORK_DIR/model-pipreqs.txt RUN pip3 install -r $WORK_DIR/generator-pipreqs.txt RUN pip3 install risingwave -RUN apt install -y lsof curl openssl libssl-dev pkg-config build-essential -RUN apt install -y cmake librdkafka-dev +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path -y +ENV PATH /root/.cargo/bin/:$PATH # Install .NET 6.0 RUN wget https://packages.microsoft.com/config/debian/11/packages-microsoft-prod.deb -O packages-microsoft-prod.deb @@ -49,11 +53,11 @@ ADD ./run.sh $WORK_DIR/run-sh/ ADD ./run-mfa.sh $WORK_DIR/run-sh/ RUN if [ "$BUILD_ARG" = "mfa" ]; then \ - cp $WORK_DIR/run-sh/run-mfa.sh $WORK_DIR/run.sh;\ + cp $WORK_DIR/run-sh/run-mfa.sh $WORK_DIR/run.sh;\ else \ - cp $WORK_DIR/run-sh/run.sh $WORK_DIR/run.sh;\ + cp $WORK_DIR/run-sh/run.sh $WORK_DIR/run.sh;\ fi RUN chmod +x $WORK_DIR/run.sh && rm -rf $WORK_DIR/run-sh -CMD ["sh", "-c", "sleep 10 && ./run.sh"] \ No newline at end of file +CMD ["sh", "-c", "sleep 10 && ./run.sh"] diff --git a/integration_tests/feature-store/README.md b/integration_tests/feature-store/README.md index 425efadf27c68..f74c679e73ffb 100644 --- a/integration_tests/feature-store/README.md +++ b/integration_tests/feature-store/README.md @@ -25,7 +25,9 @@ When a user needs to make a prediction using these features, they can provide th 1. Build docker. Kafka RisingWave and Feature Store. -```docker compose up --build``` +``` +docker compose up --build +``` The Feature Store system performs several tasks in sequence: @@ -40,7 +42,9 @@ The Feature Store system performs several tasks in sequence: 2. Then we can get the simulation results for Feature store in `.log`. -```cat .log/simulator_log``` +``` +cat .log/simulator_log +``` # Account change feature store #### Case Description @@ -53,8 +57,12 @@ In this case, we need to calculate the frequency and count of user account chang 1. Build docker. Kafka RisingWave and Feature Store. -```docker compose build --build-arg BUILD_ARG=mfa``` +``` +docker compose build --build-arg BUILD_ARG=mfa +``` 2. Then we can get the simulation results for Feature store in `.log`. -```cat .log/simulator_log``` \ No newline at end of file +``` +cat .log/simulator_log +``` \ No newline at end of file diff --git a/integration_tests/feature-store/docker-compose.yml b/integration_tests/feature-store/docker-compose.yml index d212a80369a38..caa950ea87ccd 100644 --- a/integration_tests/feature-store/docker-compose.yml +++ b/integration_tests/feature-store/docker-compose.yml @@ -66,7 +66,6 @@ services: - "./mfa-start.sql:/mfa-start.sql" - "./mfa-mock.sql:/mfa-mock.sql" feature-store: - image: rust:1.67 build: context: . target: feature-store-server diff --git a/integration_tests/feature-store/server/Cargo.lock b/integration_tests/feature-store/server/Cargo.lock index a678176645772..452bbfe3db14d 100644 --- a/integration_tests/feature-store/server/Cargo.lock +++ b/integration_tests/feature-store/server/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -19,10 +19,11 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ + "cfg-if", "getrandom", "once_cell", "version_check", @@ -30,27 +31,72 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "anstream" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ - "winapi", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys", ] [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "async-stream" @@ -71,40 +117,29 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "atoi" -version = "0.4.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616896e05fc0e2649463a93a15183c6a16bf03413a7af88ef1285ddedfa9cda5" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" dependencies = [ "num-traits", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -113,9 +148,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", @@ -131,20 +166,19 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", + "rustversion", "serde", "sync_wrapper", - "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -152,15 +186,16 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", "tower-layer", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -173,15 +208,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.1" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] -name = "base64" -version = "0.21.2" +name = "base64ct" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bitflags" @@ -191,9 +226,12 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +dependencies = [ + "serde", +] [[package]] name = "block-buffer" @@ -206,27 +244,27 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cc" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] @@ -239,19 +277,31 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "2.34.0" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", "strsim", - "textwrap", - "unicode-width", - "vec_map", ] +[[package]] +name = "clap_lex" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" + [[package]] name = "cmake" version = "0.1.50" @@ -261,6 +311,18 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + +[[package]] +name = "const-oid" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" + [[package]] name = "core-foundation" version = "0.9.3" @@ -288,18 +350,18 @@ dependencies = [ [[package]] name = "crc" -version = "2.1.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "1.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crossbeam-queue" @@ -330,6 +392,17 @@ dependencies = [ "typenum", ] +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + [[package]] name = "digest" version = "0.10.7" @@ -337,41 +410,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", + "const-oid", "crypto-common", "subtle", ] [[package]] -name = "dirs" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "dotenv" -version = "0.15.0" +name = "dotenvy" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +dependencies = [ + "serde", +] [[package]] name = "encoding_rs" @@ -390,23 +447,23 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.2" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "etcetera" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "cc", - "libc", + "cfg-if", + "home", + "windows-sys", ] [[package]] @@ -423,9 +480,15 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "finl_unicode" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" [[package]] name = "fixedbitset" @@ -433,6 +496,17 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -463,21 +537,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "futures" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - [[package]] name = "futures-channel" version = "0.3.28" @@ -507,13 +566,13 @@ dependencies = [ [[package]] name = "futures-intrusive" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot 0.11.2", + "parking_lot", ] [[package]] @@ -530,7 +589,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -551,7 +610,6 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ - "futures-channel", "futures-core", "futures-io", "futures-macro", @@ -586,15 +644,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "h2" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -609,15 +667,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -629,14 +678,18 @@ name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash", + "allocator-api2", +] [[package]] name = "hashlink" -version = "0.7.0" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.14.0", ] [[package]] @@ -650,18 +703,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -687,6 +731,15 @@ dependencies = [ "digest", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys", +] + [[package]] name = "http" version = "0.2.9" @@ -709,12 +762,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" version = "1.8.0" @@ -723,9 +770,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" @@ -806,15 +853,6 @@ dependencies = [ "hashbrown 0.14.0", ] -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - [[package]] name = "ipnet" version = "2.8.0" @@ -823,9 +861,9 @@ checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itertools" -version = "0.10.5" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] @@ -850,12 +888,32 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] [[package]] name = "libc" -version = "0.2.147" +version = "0.2.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libsqlite3-sys" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] [[package]] name = "libz-sys" @@ -871,9 +929,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -887,15 +945,15 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" @@ -908,9 +966,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "mime" @@ -978,13 +1036,52 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -993,7 +1090,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", ] @@ -1020,9 +1117,9 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] @@ -1035,11 +1132,11 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.56" +version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" +checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -1056,7 +1153,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -1067,9 +1164,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.91" +version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" +checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ "cc", "libc", @@ -1077,17 +1174,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.1" @@ -1095,21 +1181,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -1120,7 +1192,7 @@ checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", + "redox_syscall", "smallvec", "windows-targets", ] @@ -1131,6 +1203,15 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.0" @@ -1139,12 +1220,12 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "petgraph" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 1.9.3", + "indexmap 2.0.0", ] [[package]] @@ -1182,14 +1263,14 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "pin-project-lite" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c516611246607d0c04186886dbb3a754368ef82c79e9827a802c6d836dd111c" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -1197,6 +1278,27 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.27" @@ -1205,11 +1307,11 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "postgres-protocol" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.2", + "base64", "byteorder", "bytes", "fallible-iterator", @@ -1223,9 +1325,9 @@ dependencies = [ [[package]] name = "postgres-types" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" dependencies = [ "bytes", "fallible-iterator", @@ -1240,12 +1342,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.1.25" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 1.0.109", + "syn 2.0.38", ] [[package]] @@ -1260,18 +1362,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.10.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" dependencies = [ "bytes", "prost-derive", @@ -1279,54 +1381,53 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.10.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ "bytes", - "cfg-if", - "cmake", "heck", "itertools", - "lazy_static", "log", "multimap", + "once_cell", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn 2.0.38", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.10.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.38", ] [[package]] name = "prost-types" -version = "0.10.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" dependencies = [ - "bytes", "prost", ] [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -1363,11 +1464,12 @@ dependencies = [ [[package]] name = "rdkafka" -version = "0.25.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8acd8f5c5482fdf89e8878227bafa442d8c4409f6287391c85549ca83626c27" +checksum = "053adfa02fab06e86c01d586cc68aa47ee0ff4489a59469081dc12cbcde578bf" dependencies = [ - "futures", + "futures-channel", + "futures-util", "libc", "log", "rdkafka-sys", @@ -1380,25 +1482,17 @@ dependencies = [ [[package]] name = "rdkafka-sys" -version = "3.0.0+1.6.0" +version = "4.6.0+2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca35e95c88e08cdc643b25744e38ccee7c93c7e90d1ac6850fe74cbaa40803c3" +checksum = "ad63c279fca41a27c231c450a2d2ad18288032e9cbb159ad16c9d96eba35aaaf" dependencies = [ + "cmake", "libc", "libz-sys", "num_enum", "pkg-config", ] -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.3.5" @@ -1408,22 +1502,11 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_users" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" -dependencies = [ - "getrandom", - "redox_syscall 0.2.16", - "thiserror", -] - [[package]] name = "regex" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick", "memchr", @@ -1433,9 +1516,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick", "memchr", @@ -1444,9 +1527,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" [[package]] name = "reqwest" @@ -1454,7 +1537,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.2", + "base64", "bytes", "encoding_rs", "futures-core", @@ -1485,6 +1568,28 @@ dependencies = [ "winreg", ] +[[package]] +name = "rsa" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8" +dependencies = [ + "byteorder", + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1493,17 +1598,23 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustix" -version = "0.38.7" +version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "172891ebdceb05aa0005f533a6cbfca599ddd7d966f6f5d4d9b2e70478e70399" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", "windows-sys", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.15" @@ -1550,29 +1661,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.183" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -1608,10 +1719,10 @@ dependencies = [ ] [[package]] -name = "sha-1" -version = "0.10.1" +name = "sha1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -1638,26 +1749,36 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +dependencies = [ + "digest", + "rand_core", +] + [[package]] name = "siphasher" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" @@ -1671,19 +1792,44 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "sqlformat" -version = "0.1.8" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b7922be017ee70900be125523f38bdd644f4f06a1b16e8fa5a8ee8c34bffd4" +checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" dependencies = [ "itertools", "nom", @@ -1692,109 +1838,216 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.5.13" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551873805652ba0d912fec5bbb0f8b4cdd96baf8e2ebf5970e5671092966019b" +checksum = "0e50c216e3624ec8e7ecd14c6a6a6370aad6ee5d8cfc3ab30b5162eeeef2ed33" dependencies = [ "sqlx-core", "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", ] [[package]] name = "sqlx-core" -version = "0.5.13" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48c61941ccf5ddcada342cd59e3e5173b007c509e1e8e990dafc830294d9dc5" +checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d" dependencies = [ "ahash", "atoi", - "base64 0.13.1", - "bitflags 1.3.2", "byteorder", "bytes", "crc", "crossbeam-queue", - "dirs", + "dotenvy", "either", "event-listener", "futures-channel", "futures-core", "futures-intrusive", + "futures-io", "futures-util", "hashlink", "hex", - "hkdf", - "hmac", - "indexmap 1.9.3", - "itoa", - "libc", + "indexmap 2.0.0", "log", - "md-5", "memchr", + "native-tls", "once_cell", "paste", "percent-encoding", - "rand", "serde", "serde_json", - "sha-1", "sha2", "smallvec", "sqlformat", - "sqlx-rt", - "stringprep", "thiserror", + "tokio", "tokio-stream", + "tracing", "url", - "whoami", ] [[package]] name = "sqlx-macros" -version = "0.5.13" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a793bb3ba331ec8359c1853bd39eed32cdd7baaf22c35ccf5c92a7e8d1189ec" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0fba2b0cae21fc00fe6046f8baa4c7fcb49e379f0f592b04696607f69ed2e1" +checksum = "0a4ee1e104e00dedb6aa5ffdd1343107b0a4702e862a84320ee7cc74782d96fc" dependencies = [ - "dotenv", + "dotenvy", "either", "heck", + "hex", "once_cell", "proc-macro2", "quote", + "serde", + "serde_json", "sha2", "sqlx-core", - "sqlx-rt", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", "syn 1.0.109", + "tempfile", + "tokio", "url", ] [[package]] -name = "sqlx-rt" -version = "0.5.13" +name = "sqlx-mysql" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" +checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db" dependencies = [ - "native-tls", + "atoi", + "base64", + "bitflags 2.4.1", + "byteorder", + "bytes", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", "once_cell", - "tokio", - "tokio-native-tls", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624" +dependencies = [ + "atoi", + "base64", + "bitflags 2.4.1", + "byteorder", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "tracing", + "url", ] [[package]] name = "stringprep" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" dependencies = [ + "finl_unicode", "unicode-bidi", "unicode-normalization", ] [[package]] name = "strsim" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" @@ -1815,9 +2068,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -1832,44 +2085,35 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "tempfile" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall", "rustix", "windows-sys", ] -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -1889,20 +2133,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ - "autocfg", "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2 0.5.4", "tokio-macros", "windows-sys", ] @@ -1925,7 +2168,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -1940,9 +2183,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" dependencies = [ "async-trait", "byteorder", @@ -1951,15 +2194,17 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.1", + "parking_lot", "percent-encoding", "phf", "pin-project-lite", "postgres-protocol", "postgres-types", - "socket2 0.5.3", + "rand", + "socket2 0.5.4", "tokio", "tokio-util", + "whoami", ] [[package]] @@ -1975,9 +2220,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -1995,9 +2240,9 @@ checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.0.0", "toml_datetime", @@ -2006,17 +2251,15 @@ dependencies = [ [[package]] name = "tonic" -version = "0.7.2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" +checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.13.1", + "base64", "bytes", - "futures-core", - "futures-util", "h2", "http", "http-body", @@ -2025,28 +2268,25 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "prost-derive", "tokio", "tokio-stream", - "tokio-util", "tower", "tower-layer", "tower-service", "tracing", - "tracing-futures", ] [[package]] name = "tonic-build" -version = "0.7.2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" +checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "quote", - "syn 1.0.109", + "syn 2.0.38", ] [[package]] @@ -2069,25 +2309,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags 1.3.2", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" @@ -2102,11 +2323,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" dependencies = [ - "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -2115,34 +2335,24 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "try-lock" version = "0.2.4" @@ -2151,9 +2361,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" @@ -2163,9 +2373,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -2182,12 +2392,6 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - [[package]] name = "unicode_categories" version = "0.1.1" @@ -2196,9 +2400,9 @@ checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna", @@ -2206,16 +2410,16 @@ dependencies = [ ] [[package]] -name = "vcpkg" -version = "0.2.15" +name = "utf8parse" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] -name = "vec_map" -version = "0.8.2" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" @@ -2259,7 +2463,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -2293,7 +2497,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2316,13 +2520,14 @@ dependencies = [ [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix", ] [[package]] @@ -2368,9 +2573,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -2383,51 +2588,51 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.4" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acaaa1190073b2b101e15083c38ee8ec891b5e05cbee516521e94ec008f61e64" +checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" dependencies = [ "memchr", ] @@ -2440,3 +2645,9 @@ checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] + +[[package]] +name = "zeroize" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" diff --git a/integration_tests/feature-store/server/Cargo.toml b/integration_tests/feature-store/server/Cargo.toml index 123f089f5e7a3..ce43a3f8af506 100644 --- a/integration_tests/feature-store/server/Cargo.toml +++ b/integration_tests/feature-store/server/Cargo.toml @@ -9,23 +9,20 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sqlx = { version = "0.5", features = [ "runtime-tokio-native-tls" , "postgres" ] } +sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "postgres"] } tokio = { version = "1", features = ["full"] } -tonic = "0.7.1" +tonic = "0.10.2" reqwest = { version = "0.11", features = ["blocking"] } -rdkafka = { version = "0.25", features = ["dynamic-linking"] } +rdkafka = { version = "0.34", features = ["cmake-build"] } serde_json = "1.0" -prost = "0.10" -clap = "2.26.0" -tokio-postgres = "0.7.8" -tonic-build = "0.7.1" +prost = "0.12" +clap = "4.4.6" +tokio-postgres = "0.7.10" +tonic-build = "0.10.2" [build-dependencies] -tonic-build = "0.7.1" +tonic-build = "0.10.2" [[bin]] name = "server" path = "src/main.rs" - -[lints] -workspace = true diff --git a/integration_tests/feature-store/server/model/requirements.txt b/integration_tests/feature-store/server/model/requirements.txt index bc40c0ead3fbe..276361b0e1f83 100644 --- a/integration_tests/feature-store/server/model/requirements.txt +++ b/integration_tests/feature-store/server/model/requirements.txt @@ -1,6 +1,6 @@ -grpcio==1.48.0 -numpy==1.21.4 -protobuf==4.21.5 +grpcio==1.53.0 +numpy==1.24 +protobuf==4.21.6 psycopg==3.0.16 scikit-learn==1.3.0 -pandas==1.4.0 +pandas==2.0 diff --git a/integration_tests/feature-store/server/src/feature_store.rs b/integration_tests/feature-store/server/src/feature_store.rs index 883a7c332767d..30bb92debe5aa 100644 --- a/integration_tests/feature-store/server/src/feature_store.rs +++ b/integration_tests/feature-store/server/src/feature_store.rs @@ -57,7 +57,13 @@ impl Server for FeatureStoreServer { request: tonic::Request, ) -> Result, tonic::Status> { let do_location_id = request.into_inner(); - let fare_amount = self.get_taxi_amount(do_location_id.do_location_id.clone(),do_location_id.pu_location_id.clone()).await.unwrap(); + let fare_amount = self + .get_taxi_amount( + do_location_id.do_location_id.clone(), + do_location_id.pu_location_id.clone(), + ) + .await + .unwrap(); Ok(Response::new(GetTaxiAmountResponse { fare_amount: fare_amount as f64, })) diff --git a/integration_tests/feature-store/server/src/main.rs b/integration_tests/feature-store/server/src/main.rs index adab174aaa227..73fad5122c40f 100644 --- a/integration_tests/feature-store/server/src/main.rs +++ b/integration_tests/feature-store/server/src/main.rs @@ -1,6 +1,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgMatches, Command}; use crate::feature_store::FeatureStoreServer; use crate::kafka::KafkaSink; @@ -17,14 +17,14 @@ async fn main() { println!("Reading args"); let args = get_args(); let kafka_sink = KafkaSink::new( - args.value_of("brokers") + args.get_one::("brokers") .expect("failed to decode brokers") .to_string(), - args.value_of("output-topic") + args.get_one::("output-topic") .expect("failed to decode output_topics") .to_string(), ); - println!("Testing Kafka payload,args{:?}",args); + println!("Testing Kafka payload,args{:?}", args); tokio::spawn(KafkaSink::mock_consume()); kafka_sink .send("0".to_string(), "{init: true}".to_string()) @@ -41,23 +41,23 @@ async fn main() { .unwrap() } -fn get_args<'a>() -> ArgMatches<'a> { - App::new("feature-store") +fn get_args() -> ArgMatches { + Command::new("feature-store") .about("Feature store") .arg( - Arg::with_name("brokers") - .short("b") + Arg::new("brokers") + .short('b') .long("brokers") .help("Kafka broker list") - .takes_value(true) + .num_args(1) .default_value("kafka:9092"), ) .arg( - Arg::with_name("output-topic") + Arg::new("output-topic") .long("output-topics") .help("Output topics names") .default_value("taxi") - .takes_value(true), + .num_args(1), ) .get_matches() } diff --git a/integration_tests/feature-store/server/src/model.rs b/integration_tests/feature-store/server/src/model.rs index 2b141fafce0d1..4e7ae9464ca4e 100644 --- a/integration_tests/feature-store/server/src/model.rs +++ b/integration_tests/feature-store/server/src/model.rs @@ -1,24 +1,27 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct TrainingRequest { -} +pub struct TrainingRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct TrainingResponse { -} +pub struct TrainingResponse {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAmountRequest { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub do_location_id: i64, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub pu_location_id: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAmountResponse { - #[prost(float, tag="1")] + #[prost(float, tag = "1")] pub amount: f32, } /// Generated client implementations. pub mod model_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; use tonic::codegen::*; #[derive(Debug, Clone)] pub struct ModelClient { @@ -28,7 +31,7 @@ pub mod model_client { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where - D: std::convert::TryInto, + D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; @@ -46,6 +49,12 @@ pub mod model_client { let inner = tonic::client::Grpc::new(inner); Self { inner } } + + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( inner: T, interceptor: F, @@ -59,60 +68,81 @@ pub mod model_client { >::ResponseBody, >, >, - , - >>::Error: Into + Send + Sync, + >>::Error: + Into + Send + Sync, { ModelClient::new(InterceptedService::new(inner, interceptor)) } - /// Compress requests with `gzip`. + + /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] - pub fn send_gzip(mut self) -> Self { - self.inner = self.inner.send_gzip(); + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); self } - /// Enable decompressing responses with `gzip`. + + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.inner = self.inner.accept_gzip(); + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); self } + + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn get_amount( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/model.Model/GetAmount"); - self.inner.unary(request.into_request(), path, codec).await + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("model.Model", "GetAmount")); + self.inner.unary(req, path, codec).await } + pub async fn training( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/model.Model/Training"); - self.inner.unary(request.into_request(), path, codec).await + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("model.Model", "Training")); + self.inner.unary(req, path, codec).await } } } @@ -120,46 +150,81 @@ pub mod model_client { pub mod model_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; - ///Generated trait containing gRPC methods that should be implemented for use with ModelServer. + /// Generated trait containing gRPC methods that should be implemented for use with ModelServer. #[async_trait] pub trait Model: Send + Sync + 'static { async fn get_amount( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn training( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] pub struct ModelServer { inner: _Inner, - accept_compression_encodings: (), - send_compression_encodings: (), + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, } struct _Inner(Arc); impl ModelServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } + pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } + + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } } impl tonic::codegen::Service> for ModelServer where @@ -167,39 +232,41 @@ pub mod model_server { B: Body + Send + 'static, B::Error: Into + Send + 'static, { - type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; + type Response = http::Response; + fn poll_ready( &mut self, _cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { Poll::Ready(Ok(())) } + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/model.Model/GetAmount" => { #[allow(non_camel_case_types)] struct GetAmountSvc(pub Arc); - impl tonic::server::UnaryService - for GetAmountSvc { + impl tonic::server::UnaryService for GetAmountSvc { + type Future = BoxFuture, tonic::Status>; type Response = super::GetAmountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { (*inner).get_amount(request).await }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::get_amount(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -209,6 +276,10 @@ pub mod model_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -218,24 +289,23 @@ pub mod model_server { "/model.Model/Training" => { #[allow(non_camel_case_types)] struct TrainingSvc(pub Arc); - impl tonic::server::UnaryService - for TrainingSvc { + impl tonic::server::UnaryService for TrainingSvc { + type Future = BoxFuture, tonic::Status>; type Response = super::TrainingResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { (*inner).training(request).await }; + let inner = Arc::clone(&self.0); + let fut = async move { ::training(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -245,24 +315,24 @@ pub mod model_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), } } } @@ -273,12 +343,14 @@ pub mod model_server { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { @@ -286,7 +358,7 @@ pub mod model_server { write!(f, "{:?}", self.0) } } - impl tonic::transport::NamedService for ModelServer { + impl tonic::server::NamedService for ModelServer { const NAME: &'static str = "model.Model"; } } diff --git a/integration_tests/feature-store/server/src/server_pb.rs b/integration_tests/feature-store/server/src/server_pb.rs index 1f2912d868412..e4e25439fb29d 100644 --- a/integration_tests/feature-store/server/src/server_pb.rs +++ b/integration_tests/feature-store/server/src/server_pb.rs @@ -1,96 +1,104 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReportActionRequest { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub userid: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub eventtype: ::prost::alloc::string::String, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub changenum: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReportActionResponse { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub timestamp: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetFeatureRequest { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub userid: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetFeatureResponse { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub count: u64, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub sum: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReportTaxiActionRequest { - #[prost(int32, tag="1")] + #[prost(int32, tag = "1")] pub vendor_id: i32, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub lpep_pickup_datetime: ::prost::alloc::string::String, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub lpep_dropoff_datetime: ::prost::alloc::string::String, - #[prost(bool, tag="4")] + #[prost(bool, tag = "4")] pub store_and_fwd_flag: bool, - #[prost(double, tag="5")] + #[prost(double, tag = "5")] pub ratecode_id: f64, - #[prost(int64, tag="6")] + #[prost(int64, tag = "6")] pub pu_location_id: i64, - #[prost(int64, tag="7")] + #[prost(int64, tag = "7")] pub do_location_id: i64, - #[prost(double, tag="8")] + #[prost(double, tag = "8")] pub passenger_count: f64, - #[prost(double, tag="9")] + #[prost(double, tag = "9")] pub trip_distance: f64, - #[prost(double, tag="10")] + #[prost(double, tag = "10")] pub fare_amount: f64, - #[prost(double, tag="11")] + #[prost(double, tag = "11")] pub extra: f64, - #[prost(double, tag="12")] + #[prost(double, tag = "12")] pub mta_tax: f64, - #[prost(double, tag="13")] + #[prost(double, tag = "13")] pub tip_amount: f64, - #[prost(double, tag="14")] + #[prost(double, tag = "14")] pub tolls_amount: f64, - #[prost(double, tag="15")] + #[prost(double, tag = "15")] pub ehail_fee: f64, - #[prost(double, tag="16")] + #[prost(double, tag = "16")] pub improvement_surcharge: f64, - #[prost(double, tag="17")] + #[prost(double, tag = "17")] pub total_amount: f64, - #[prost(double, tag="18")] + #[prost(double, tag = "18")] pub payment_type: f64, - #[prost(double, tag="19")] + #[prost(double, tag = "19")] pub trip_type: f64, - #[prost(double, tag="20")] + #[prost(double, tag = "20")] pub congestion_surcharge: f64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReportTaxiActionResponse { -} +pub struct ReportTaxiActionResponse {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetTaxiAmountRequest { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub do_location_id: i64, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub pu_location_id: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetTaxiAmountResponse { - #[prost(double, tag="1")] + #[prost(double, tag = "1")] pub fare_amount: f64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct StartTrainingRequest { -} +pub struct StartTrainingRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct StartTrainingResponse { -} +pub struct StartTrainingResponse {} /// Generated client implementations. pub mod server_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; use tonic::codegen::*; #[derive(Debug, Clone)] pub struct ServerClient { @@ -100,7 +108,7 @@ pub mod server_client { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where - D: std::convert::TryInto, + D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; @@ -118,6 +126,12 @@ pub mod server_client { let inner = tonic::client::Grpc::new(inner); Self { inner } } + + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( inner: T, interceptor: F, @@ -131,121 +145,140 @@ pub mod server_client { >::ResponseBody, >, >, - , - >>::Error: Into + Send + Sync, + >>::Error: + Into + Send + Sync, { ServerClient::new(InterceptedService::new(inner, interceptor)) } - /// Compress requests with `gzip`. + + /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] - pub fn send_gzip(mut self) -> Self { - self.inner = self.inner.send_gzip(); + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); self } - /// Enable decompressing responses with `gzip`. + + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.inner = self.inner.accept_gzip(); + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); self } + pub async fn get_feature( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/GetFeature", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/GetFeature"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "GetFeature")); + self.inner.unary(req, path, codec).await } + pub async fn report_action( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/ReportAction", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/ReportAction"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "ReportAction")); + self.inner.unary(req, path, codec).await } + pub async fn report_taxi_action( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/ReportTaxiAction", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/ReportTaxiAction"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "ReportTaxiAction")); + self.inner.unary(req, path, codec).await } + pub async fn get_taxi_amount( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/GetTaxiAmount", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/GetTaxiAmount"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "GetTaxiAmount")); + self.inner.unary(req, path, codec).await } + pub async fn start_training( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/StartTraining", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/StartTraining"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "StartTraining")); + self.inner.unary(req, path, codec).await } } } @@ -253,58 +286,93 @@ pub mod server_client { pub mod server_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; - ///Generated trait containing gRPC methods that should be implemented for use with ServerServer. + /// Generated trait containing gRPC methods that should be implemented for use with ServerServer. #[async_trait] pub trait Server: Send + Sync + 'static { async fn get_feature( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn report_action( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn report_taxi_action( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn get_taxi_amount( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn start_training( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] pub struct ServerServer { inner: _Inner, - accept_compression_encodings: (), - send_compression_encodings: (), + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, } struct _Inner(Arc); impl ServerServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } + pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } + + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } } impl tonic::codegen::Service> for ServerServer where @@ -312,39 +380,41 @@ pub mod server_server { B: Body + Send + 'static, B::Error: Into + Send + 'static, { - type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; + type Response = http::Response; + fn poll_ready( &mut self, _cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { Poll::Ready(Ok(())) } + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/server_pb.Server/GetFeature" => { #[allow(non_camel_case_types)] struct GetFeatureSvc(pub Arc); - impl tonic::server::UnaryService - for GetFeatureSvc { + impl tonic::server::UnaryService for GetFeatureSvc { + type Future = BoxFuture, tonic::Status>; type Response = super::GetFeatureResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { (*inner).get_feature(request).await }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::get_feature(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -354,6 +424,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -363,28 +437,24 @@ pub mod server_server { "/server_pb.Server/ReportAction" => { #[allow(non_camel_case_types)] struct ReportActionSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for ReportActionSvc { + impl tonic::server::UnaryService for ReportActionSvc { + type Future = BoxFuture, tonic::Status>; type Response = super::ReportActionResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { - (*inner).report_action(request).await - }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::report_action(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -394,6 +464,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -403,28 +477,27 @@ pub mod server_server { "/server_pb.Server/ReportTaxiAction" => { #[allow(non_camel_case_types)] struct ReportTaxiActionSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for ReportTaxiActionSvc { + impl tonic::server::UnaryService + for ReportTaxiActionSvc + { + type Future = BoxFuture, tonic::Status>; type Response = super::ReportTaxiActionResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); + let inner = Arc::clone(&self.0); let fut = async move { - (*inner).report_taxi_action(request).await + ::report_taxi_action(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -434,6 +507,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -443,28 +520,25 @@ pub mod server_server { "/server_pb.Server/GetTaxiAmount" => { #[allow(non_camel_case_types)] struct GetTaxiAmountSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for GetTaxiAmountSvc { + impl tonic::server::UnaryService for GetTaxiAmountSvc { + type Future = BoxFuture, tonic::Status>; type Response = super::GetTaxiAmountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); + let inner = Arc::clone(&self.0); let fut = async move { - (*inner).get_taxi_amount(request).await + ::get_taxi_amount(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -474,6 +548,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -483,28 +561,24 @@ pub mod server_server { "/server_pb.Server/StartTraining" => { #[allow(non_camel_case_types)] struct StartTrainingSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for StartTrainingSvc { + impl tonic::server::UnaryService for StartTrainingSvc { + type Future = BoxFuture, tonic::Status>; type Response = super::StartTrainingResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { - (*inner).start_training(request).await - }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::start_training(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -514,24 +588,24 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), } } } @@ -542,12 +616,14 @@ pub mod server_server { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { @@ -555,7 +631,7 @@ pub mod server_server { write!(f, "{:?}", self.0) } } - impl tonic::transport::NamedService for ServerServer { + impl tonic::server::NamedService for ServerServer { const NAME: &'static str = "server_pb.Server"; } } diff --git a/integration_tests/feature-store/server/src/serving.rs b/integration_tests/feature-store/server/src/serving.rs index c00d8041492c0..d1137e534e7f9 100644 --- a/integration_tests/feature-store/server/src/serving.rs +++ b/integration_tests/feature-store/server/src/serving.rs @@ -53,8 +53,15 @@ impl FeatureStoreServer { } } - pub async fn get_taxi_amount(&self, do_location_id: i64,pu_location_id: i64) -> Result { - let request = GetAmountRequest { do_location_id ,pu_location_id}; + pub async fn get_taxi_amount( + &self, + do_location_id: i64, + pu_location_id: i64, + ) -> Result { + let request = GetAmountRequest { + do_location_id, + pu_location_id, + }; let mut model_client = ModelClient::connect("http://localhost:8080") .await .expect("Failed to connect to model server"); diff --git a/integration_tests/feature-store/simulator/Cargo.lock b/integration_tests/feature-store/simulator/Cargo.lock index c2be1809ce1bd..9d7b0fc4a1f82 100644 --- a/integration_tests/feature-store/simulator/Cargo.lock +++ b/integration_tests/feature-store/simulator/Cargo.lock @@ -3,35 +3,90 @@ version = 3 [[package]] -name = "ansi_term" -version = "0.12.1" +name = "addr2line" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "winapi", + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "anstream" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys", ] [[package]] name = "anyhow" -version = "1.0.58" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", @@ -40,26 +95,15 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -68,13 +112,13 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.13" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes", "futures-util", "http", @@ -86,20 +130,19 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", + "rustversion", "serde", "sync_wrapper", - "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.2.7" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -107,13 +150,31 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", ] [[package]] name = "base64" -version = "0.13.0" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "bitflags" @@ -121,23 +182,32 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "bytes" -version = "1.1.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "cfg-if" @@ -147,19 +217,37 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "2.34.0" +version = "4.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" dependencies = [ - "ansi_term", - "atty", - "bitflags", + "anstream", + "anstyle", + "clap_lex", "strsim", - "textwrap", - "unicode-width", - "vec_map", ] +[[package]] +name = "clap_lex" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "core-foundation" version = "0.9.3" @@ -172,15 +260,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -190,37 +278,44 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] [[package]] name = "either" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] [[package]] -name = "fastrand" -version = "1.7.0" +name = "errno" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "instant", + "libc", + "windows-sys", ] +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + [[package]] name = "fnv" version = "1.0.7" @@ -244,19 +339,18 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ - "matches", "percent-encoding", ] [[package]] name = "futures" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -269,9 +363,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -279,15 +373,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -296,15 +390,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", @@ -313,21 +407,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -343,31 +437,26 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "wasi", ] [[package]] -name = "getrandom" -version = "0.2.7" +name = "gimli" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "h2" -version = "0.3.13" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -390,18 +479,15 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -419,29 +505,23 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -454,7 +534,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -488,60 +568,50 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", ] -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - [[package]] name = "ipnet" -version = "2.5.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itertools" -version = "0.10.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -554,60 +624,65 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] -name = "log" -version = "0.4.17" +name = "linux-raw-sys" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] -name = "matches" -version = "0.1.9" +name = "log" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] [[package]] name = "mio" -version = "0.8.4" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys", ] [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -623,27 +698,36 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" -version = "1.13.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -654,9 +738,9 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", @@ -671,11 +755,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ - "autocfg", "cc", "libc", "pkg-config", @@ -684,24 +767,24 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pin-project" -version = "1.0.11" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", @@ -710,9 +793,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -722,30 +805,30 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.40" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.10.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" dependencies = [ "bytes", "prost-derive", @@ -753,9 +836,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.10.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", "itertools", @@ -766,26 +849,13 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.20" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -793,18 +863,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -814,59 +874,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom", ] [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", -] - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", + "bitflags 1.3.2", ] [[package]] name = "reqwest" -version = "0.11.11" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64", "bytes", @@ -880,15 +913,16 @@ dependencies = [ "hyper-tls", "ipnet", "js-sys", - "lazy_static", "log", "mime", "native-tls", + "once_cell", "percent-encoding", "pin-project-lite", "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", "tower-service", @@ -899,29 +933,53 @@ dependencies = [ "winreg", ] +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "lazy_static", "windows-sys", ] [[package]] name = "security-framework" -version = "2.6.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -930,9 +988,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -940,18 +998,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.139" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0171ebb889e45aa68b44aee0859b3eede84c6f5f5c228e6f140c0b2a0a46cad6" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.139" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1d3230c1de7932af58ad8ffbe1d784bd55efd5a9d84ac24f69c72d83543dfb" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", @@ -960,9 +1018,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.82" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -989,7 +1047,7 @@ dependencies = [ "csv", "futures", "prost", - "rand 0.7.3", + "rand", "reqwest", "serde", "serde_derive", @@ -1000,31 +1058,44 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] +[[package]] +name = "socket2" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +dependencies = [ + "libc", + "windows-sys", +] + [[package]] name = "strsim" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.98" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -1033,31 +1104,42 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] -name = "tempfile" -version = "3.3.0" +name = "system-configuration" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "cfg-if", - "fastrand", + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", "libc", - "redox_syscall", - "remove_dir_all", - "winapi", ] [[package]] -name = "textwrap" -version = "0.11.0" +name = "tempfile" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ - "unicode-width", + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys", ] [[package]] @@ -1071,27 +1153,25 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.20.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57aec3cfa4c296db7255446efb4928a6be304b431a806216105542a67b6ca82e" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ - "autocfg", + "backtrace", "bytes", "libc", - "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", - "socket2", + "socket2 0.5.4", "tokio-macros", - "winapi", + "windows-sys", ] [[package]] @@ -1106,9 +1186,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", @@ -1117,9 +1197,9 @@ dependencies = [ [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -1127,9 +1207,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -1138,9 +1218,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.3" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -1152,17 +1232,15 @@ dependencies = [ [[package]] name = "tonic" -version = "0.7.2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb" +checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" dependencies = [ "async-stream", "async-trait", "axum", "base64", "bytes", - "futures-core", - "futures-util", "h2", "http", "http-body", @@ -1171,15 +1249,12 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "prost-derive", "tokio", "tokio-stream", - "tokio-util", "tower", "tower-layer", "tower-service", "tracing", - "tracing-futures", ] [[package]] @@ -1193,7 +1268,7 @@ dependencies = [ "indexmap", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util", @@ -1202,30 +1277,11 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" -dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" @@ -1235,12 +1291,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" dependencies = [ - "cfg-if", - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -1248,9 +1302,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.22" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", @@ -1259,96 +1313,72 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.2" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - [[package]] name = "url" -version = "2.2.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", ] [[package]] -name = "vcpkg" -version = "0.2.15" +name = "utf8parse" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] -name = "vec_map" -version = "0.8.2" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1357,9 +1387,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1367,13 +1397,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -1382,9 +1412,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.31" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -1394,9 +1424,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1404,9 +1434,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", @@ -1417,15 +1447,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -1455,52 +1485,76 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" -version = "0.36.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ + "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", + "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winreg" -version = "0.10.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys", ] diff --git a/integration_tests/feature-store/simulator/Cargo.toml b/integration_tests/feature-store/simulator/Cargo.toml index 1fd9609ba2d1a..03264cde563bb 100644 --- a/integration_tests/feature-store/simulator/Cargo.toml +++ b/integration_tests/feature-store/simulator/Cargo.toml @@ -9,17 +9,14 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tokio = { version = "1", features=["rt", "rt-multi-thread"]} -tonic = "0.7.1" +tokio = { version = "1", features=["macros","rt", "rt-multi-thread"]} +tonic = "0.10.2" reqwest = { version = "0.11"} serde_json = "1.0" serde_derive = "1.0" -rand = "0.7" -clap = "2.26.0" -prost = "0.10" +rand = "0.8" +clap = "4.4.6" +prost = "0.12" serde = { version = "1", features = ["derive"] } -futures = "0.3.0" -csv = "1.2.2" - -[lints] -workspace = true +futures = "0.3.28" +csv = "1.3.0" diff --git a/integration_tests/feature-store/simulator/rust-toolchain b/integration_tests/feature-store/simulator/rust-toolchain index 35bda38a1dcfb..292fe499e3b25 100644 --- a/integration_tests/feature-store/simulator/rust-toolchain +++ b/integration_tests/feature-store/simulator/rust-toolchain @@ -1 +1,2 @@ -nightly-2022-06-20 +[toolchain] +channel = "stable" diff --git a/integration_tests/feature-store/simulator/src/entities.rs b/integration_tests/feature-store/simulator/src/entities.rs index 114a1e5c1ee0f..0bdf4fe0235d3 100644 --- a/integration_tests/feature-store/simulator/src/entities.rs +++ b/integration_tests/feature-store/simulator/src/entities.rs @@ -38,7 +38,7 @@ impl User { &'a self, client: &'a mut ServerClient, ) -> Result { - let changenum: i64 = rand::thread_rng().gen_range(0, 90); + let changenum: i64 = rand::thread_rng().gen_range(0..90); let (changenum, event_type) = { if changenum > 0 && changenum < 30 { (changenum, "mfa+") @@ -60,7 +60,7 @@ impl User { Ok(ActionHistory { userid: self.userid.clone(), - changenum: changenum, + changenum, event_type: event_type.to_string(), timestamp, }) @@ -93,5 +93,5 @@ pub fn parse_user_metadata() -> Result, ()> { let users = read_users_json(Path::new(&*generator_path).join("users.json")).unwrap(); - return Ok(users); + Ok(users) } diff --git a/integration_tests/feature-store/simulator/src/entities_taxi.rs b/integration_tests/feature-store/simulator/src/entities_taxi.rs index 8ef9e1f358e4b..b5800f707d0e0 100644 --- a/integration_tests/feature-store/simulator/src/entities_taxi.rs +++ b/integration_tests/feature-store/simulator/src/entities_taxi.rs @@ -101,7 +101,7 @@ impl TaxiFeature { mta_tax: self.mta_tax, tip_amount: self.tip_amount, tolls_amount: self.tolls_amount, - ehail_fee: self.ehail_fee.unwrap_or_else(|| 0.0), + ehail_fee: self.ehail_fee.unwrap_or(0.0), improvement_surcharge: self.improvement_surcharge, total_amount: self.total_amount, payment_type: self.payment_type, diff --git a/integration_tests/feature-store/simulator/src/main.rs b/integration_tests/feature-store/simulator/src/main.rs index daa6c30af1c74..f062b63d9cc84 100644 --- a/integration_tests/feature-store/simulator/src/main.rs +++ b/integration_tests/feature-store/simulator/src/main.rs @@ -1,4 +1,4 @@ -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgMatches, Command}; mod entities; mod entities_taxi; @@ -9,22 +9,22 @@ mod simulation; async fn main() { let args = get_args(); simulation::main_loop( - args.value_of("types") + args.get_one::("types") .expect("failed to decode brokers") .to_string(), ) .await; } -fn get_args<'a>() -> ArgMatches<'a> { - App::new("simulator") +fn get_args() -> ArgMatches { + Command::new("simulator") .about("The simulator") .arg( - Arg::with_name("types") - .short("t") + Arg::new("types") + .short('t') .long("types") .help("mfa or taxi") - .takes_value(true) + .num_args(1) .default_value("taxi"), ) .get_matches() diff --git a/integration_tests/feature-store/simulator/src/server_pb.rs b/integration_tests/feature-store/simulator/src/server_pb.rs index 1f2912d868412..697c2870b7df8 100644 --- a/integration_tests/feature-store/simulator/src/server_pb.rs +++ b/integration_tests/feature-store/simulator/src/server_pb.rs @@ -1,96 +1,104 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReportActionRequest { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub userid: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub eventtype: ::prost::alloc::string::String, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub changenum: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReportActionResponse { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub timestamp: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetFeatureRequest { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub userid: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetFeatureResponse { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub count: u64, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub sum: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReportTaxiActionRequest { - #[prost(int32, tag="1")] + #[prost(int32, tag = "1")] pub vendor_id: i32, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub lpep_pickup_datetime: ::prost::alloc::string::String, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub lpep_dropoff_datetime: ::prost::alloc::string::String, - #[prost(bool, tag="4")] + #[prost(bool, tag = "4")] pub store_and_fwd_flag: bool, - #[prost(double, tag="5")] + #[prost(double, tag = "5")] pub ratecode_id: f64, - #[prost(int64, tag="6")] + #[prost(int64, tag = "6")] pub pu_location_id: i64, - #[prost(int64, tag="7")] + #[prost(int64, tag = "7")] pub do_location_id: i64, - #[prost(double, tag="8")] + #[prost(double, tag = "8")] pub passenger_count: f64, - #[prost(double, tag="9")] + #[prost(double, tag = "9")] pub trip_distance: f64, - #[prost(double, tag="10")] + #[prost(double, tag = "10")] pub fare_amount: f64, - #[prost(double, tag="11")] + #[prost(double, tag = "11")] pub extra: f64, - #[prost(double, tag="12")] + #[prost(double, tag = "12")] pub mta_tax: f64, - #[prost(double, tag="13")] + #[prost(double, tag = "13")] pub tip_amount: f64, - #[prost(double, tag="14")] + #[prost(double, tag = "14")] pub tolls_amount: f64, - #[prost(double, tag="15")] + #[prost(double, tag = "15")] pub ehail_fee: f64, - #[prost(double, tag="16")] + #[prost(double, tag = "16")] pub improvement_surcharge: f64, - #[prost(double, tag="17")] + #[prost(double, tag = "17")] pub total_amount: f64, - #[prost(double, tag="18")] + #[prost(double, tag = "18")] pub payment_type: f64, - #[prost(double, tag="19")] + #[prost(double, tag = "19")] pub trip_type: f64, - #[prost(double, tag="20")] + #[prost(double, tag = "20")] pub congestion_surcharge: f64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReportTaxiActionResponse { -} +pub struct ReportTaxiActionResponse {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetTaxiAmountRequest { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub do_location_id: i64, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub pu_location_id: i64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetTaxiAmountResponse { - #[prost(double, tag="1")] + #[prost(double, tag = "1")] pub fare_amount: f64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct StartTrainingRequest { -} +pub struct StartTrainingRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct StartTrainingResponse { -} +pub struct StartTrainingResponse {} /// Generated client implementations. pub mod server_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; use tonic::codegen::*; #[derive(Debug, Clone)] pub struct ServerClient { @@ -100,7 +108,7 @@ pub mod server_client { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where - D: std::convert::TryInto, + D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; @@ -118,6 +126,10 @@ pub mod server_client { let inner = tonic::client::Grpc::new(inner); Self { inner } } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } pub fn with_interceptor( inner: T, interceptor: F, @@ -131,121 +143,131 @@ pub mod server_client { >::ResponseBody, >, >, - , - >>::Error: Into + Send + Sync, + >>::Error: + Into + Send + Sync, { ServerClient::new(InterceptedService::new(inner, interceptor)) } - /// Compress requests with `gzip`. + /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] - pub fn send_gzip(mut self) -> Self { - self.inner = self.inner.send_gzip(); + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); self } - /// Enable decompressing responses with `gzip`. + /// Enable decompressing responses. #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.inner = self.inner.accept_gzip(); + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); self } pub async fn get_feature( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/GetFeature", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/GetFeature"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "GetFeature")); + self.inner.unary(req, path, codec).await } pub async fn report_action( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/ReportAction", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/ReportAction"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "ReportAction")); + self.inner.unary(req, path, codec).await } pub async fn report_taxi_action( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/ReportTaxiAction", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/ReportTaxiAction"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "ReportTaxiAction")); + self.inner.unary(req, path, codec).await } pub async fn get_taxi_amount( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/GetTaxiAmount", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/GetTaxiAmount"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "GetTaxiAmount")); + self.inner.unary(req, path, codec).await } pub async fn start_training( &mut self, request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/server_pb.Server/StartTraining", - ); - self.inner.unary(request.into_request(), path, codec).await + let path = http::uri::PathAndQuery::from_static("/server_pb.Server/StartTraining"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("server_pb.Server", "StartTraining")); + self.inner.unary(req, path, codec).await } } } @@ -253,35 +275,37 @@ pub mod server_client { pub mod server_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; - ///Generated trait containing gRPC methods that should be implemented for use with ServerServer. + /// Generated trait containing gRPC methods that should be implemented for use with ServerServer. #[async_trait] pub trait Server: Send + Sync + 'static { async fn get_feature( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn report_action( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn report_taxi_action( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn get_taxi_amount( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; async fn start_training( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] pub struct ServerServer { inner: _Inner, - accept_compression_encodings: (), - send_compression_encodings: (), + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, } struct _Inner(Arc); impl ServerServer { @@ -294,17 +318,44 @@ pub mod server_server { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } } impl tonic::codegen::Service> for ServerServer where @@ -318,7 +369,7 @@ pub mod server_server { fn poll_ready( &mut self, _cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { @@ -327,24 +378,23 @@ pub mod server_server { "/server_pb.Server/GetFeature" => { #[allow(non_camel_case_types)] struct GetFeatureSvc(pub Arc); - impl tonic::server::UnaryService - for GetFeatureSvc { + impl tonic::server::UnaryService for GetFeatureSvc { type Response = super::GetFeatureResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { (*inner).get_feature(request).await }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::get_feature(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -354,6 +404,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -363,28 +417,23 @@ pub mod server_server { "/server_pb.Server/ReportAction" => { #[allow(non_camel_case_types)] struct ReportActionSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for ReportActionSvc { + impl tonic::server::UnaryService for ReportActionSvc { type Response = super::ReportActionResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { - (*inner).report_action(request).await - }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::report_action(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -394,6 +443,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -403,28 +456,26 @@ pub mod server_server { "/server_pb.Server/ReportTaxiAction" => { #[allow(non_camel_case_types)] struct ReportTaxiActionSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for ReportTaxiActionSvc { + impl tonic::server::UnaryService + for ReportTaxiActionSvc + { type Response = super::ReportTaxiActionResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); + let inner = Arc::clone(&self.0); let fut = async move { - (*inner).report_taxi_action(request).await + ::report_taxi_action(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -434,6 +485,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -443,28 +498,24 @@ pub mod server_server { "/server_pb.Server/GetTaxiAmount" => { #[allow(non_camel_case_types)] struct GetTaxiAmountSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for GetTaxiAmountSvc { + impl tonic::server::UnaryService for GetTaxiAmountSvc { type Response = super::GetTaxiAmountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); + let inner = Arc::clone(&self.0); let fut = async move { - (*inner).get_taxi_amount(request).await + ::get_taxi_amount(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -474,6 +525,10 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) @@ -483,28 +538,23 @@ pub mod server_server { "/server_pb.Server/StartTraining" => { #[allow(non_camel_case_types)] struct StartTrainingSvc(pub Arc); - impl< - T: Server, - > tonic::server::UnaryService - for StartTrainingSvc { + impl tonic::server::UnaryService for StartTrainingSvc { type Response = super::StartTrainingResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { - (*inner).start_training(request).await - }; + let inner = Arc::clone(&self.0); + let fut = + async move { ::start_training(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -514,24 +564,24 @@ pub mod server_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), } } } @@ -542,12 +592,14 @@ pub mod server_server { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { @@ -555,7 +607,7 @@ pub mod server_server { write!(f, "{:?}", self.0) } } - impl tonic::transport::NamedService for ServerServer { + impl tonic::server::NamedService for ServerServer { const NAME: &'static str = "server_pb.Server"; } } diff --git a/integration_tests/feature-store/simulator/src/simulation.rs b/integration_tests/feature-store/simulator/src/simulation.rs index 96f3c31921a5e..8a693e9227809 100644 --- a/integration_tests/feature-store/simulator/src/simulation.rs +++ b/integration_tests/feature-store/simulator/src/simulation.rs @@ -4,7 +4,6 @@ use std::thread::sleep; use std::time::Duration; use futures::future::join_all; -use rand; use rand::Rng; use tokio::sync::Mutex; use tonic::transport::Channel; @@ -14,8 +13,7 @@ use crate::server_pb::StartTrainingRequest; use crate::{entities, entities_taxi}; fn get_delay_mills(delay_val: f64) -> u64 { - let turbulence = - rand::thread_rng().gen_range((delay_val * 0.6) as f64, (delay_val * 1.1) as f64) as f64; + let turbulence = rand::thread_rng().gen_range((delay_val * 0.6)..(delay_val * 1.1)); (turbulence * 10000.0) as u64 } @@ -33,7 +31,7 @@ pub async fn main_loop(simulator_type: String) { } } -async fn mock_taxi(client: Arc>>) -> () { +async fn mock_taxi(client: Arc>>) { let (offline_features, online_features) = entities_taxi::parse_taxi_metadata(); println!("Write training data len is {:?}", offline_features.len()); let mut threads = vec![]; @@ -87,7 +85,7 @@ async fn mock_taxi(client: Arc>>) -> () { } #[allow(dead_code)] -async fn mock_user_mfa(client: Arc>>) -> () { +async fn mock_user_mfa(client: Arc>>) { let users = entities::parse_user_metadata().unwrap(); let mut threads = vec![]; for user in users { diff --git a/integration_tests/iceberg-cdc/README.md b/integration_tests/iceberg-cdc/README.md new file mode 100644 index 0000000000000..56f40172c3dfa --- /dev/null +++ b/integration_tests/iceberg-cdc/README.md @@ -0,0 +1,5 @@ +# Iceberg CDC Integration Tests +`mysql -> rw -> iceberg` + +# How to run +./run_test.sh \ No newline at end of file diff --git a/integration_tests/iceberg-cdc/docker-compose.yaml b/integration_tests/iceberg-cdc/docker-compose.yaml new file mode 100644 index 0000000000000..8e9ad1062ef38 --- /dev/null +++ b/integration_tests/iceberg-cdc/docker-compose.yaml @@ -0,0 +1,142 @@ +version: '3.8' + +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + mc: + depends_on: + - minio-0 + image: minio/mc + environment: + - AWS_ACCESS_KEY_ID=hummockadmin + - AWS_SECRET_ACCESS_KEY=hummockadmin + - AWS_REGION=us-east-1 + entrypoint: > + /bin/sh -c " + until (/usr/bin/mc config host add minio http://minio-0:9301 hummockadmin hummockadmin) do echo '...waiting...' && sleep 1; done; + /usr/bin/mc rm -r --force minio/icebergdata; + /usr/bin/mc mb minio/icebergdata; + /usr/bin/mc anonymous set public minio/icebergdata; + tail -f /dev/null + " + + mysql: + image: mysql:8.0 + expose: + - 3306 + ports: + - "3306:3306" + environment: + - MYSQL_ROOT_PASSWORD=123456 + - MYSQL_USER=mysqluser + - MYSQL_PASSWORD=mysqlpw + - MYSQL_DATABASE=mydb + healthcheck: + test: [ "CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -u root -p123456" ] + interval: 5s + timeout: 5s + retries: 5 + container_name: mysql + prepare_mysql: + image: mysql:8.0 + depends_on: + - mysql + command: + - /bin/sh + - -c + - "mysql -p123456 -h mysql mydb < mysql_prepare.sql" + volumes: + - "./mysql_prepare.sql:/mysql_prepare.sql" + container_name: prepare_mysql + restart: on-failure + + rest: + image: tabulario/iceberg-rest:0.6.0 + environment: + - AWS_ACCESS_KEY_ID=hummockadmin + - AWS_SECRET_ACCESS_KEY=hummockadmin + - AWS_REGION=us-east-1 + - CATALOG_CATOLOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog + - CATALOG_URI=jdbc:sqlite:file:/tmp/iceberg_rest_mode=memory + - CATALOG_WAREHOUSE=s3://icebergdata/demo + - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO + - CATALOG_S3_ENDPOINT=http://minio-0:9301 + depends_on: + - minio-0 + # let the rest access minio through: hummock001.minio-0 + links: + - minio-0:icebergdata.minio-0 + expose: + - 8181 + ports: + - "8181:8181" + + spark: + depends_on: + - minio-0 + - rest + image: ghcr.io/icelake-io/icelake-spark:latest + environment: + - AWS_ACCESS_KEY_ID=hummockadmin + - AWS_SECRET_ACCESS_KEY=hummockadmin + - AWS_REGION=us-east-1 + - SPARK_HOME=/opt/spark + - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/spark/bin:/opt/spark/sbin + user: root + links: + - minio-0:icebergdata.minio-0 + expose: + - 15002 + ports: + - "15002:15002" + healthcheck: + test: netstat -ltn | grep -c 15002 + interval: 1s + retries: 1200 + volumes: + - ./spark:/spark + command: [ "bash", "/spark/spark-connect-server.sh" ] + +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + spark: + external: false diff --git a/integration_tests/iceberg-cdc/mysql_prepare.sql b/integration_tests/iceberg-cdc/mysql_prepare.sql new file mode 100644 index 0000000000000..3e5a236a41205 --- /dev/null +++ b/integration_tests/iceberg-cdc/mysql_prepare.sql @@ -0,0 +1,15 @@ +-- mysql -p123456 -uroot -h 127.0.0.1 mydb < mysql_prepare.sql +-- +-- Mysql +USE mydb; + +CREATE TABLE user_behaviors ( + user_id VARCHAR(60), + target_id VARCHAR(60), + target_type VARCHAR(60), + event_timestamp VARCHAR(100), + behavior_type VARCHAR(60), + parent_target_type VARCHAR(60), + parent_target_id VARCHAR(60), + PRIMARY KEY(user_id, target_id, event_timestamp) +); diff --git a/integration_tests/iceberg-cdc/python/check.py b/integration_tests/iceberg-cdc/python/check.py new file mode 100644 index 0000000000000..699fa4df29c30 --- /dev/null +++ b/integration_tests/iceberg-cdc/python/check.py @@ -0,0 +1,25 @@ +from pyspark.sql import SparkSession +import configparser +import psycopg2 + +def check_spark_table(args): + expect_row_count = 0 + rw_config = args['risingwave'] + with psycopg2.connect(database=rw_config['db'], user=rw_config['user'], host=rw_config['host'], + port=rw_config['port']) as conn: + with conn.cursor() as cursor: + cursor.execute("SELECT COUNT(*) FROM user_behaviors") + expect_row_count = cursor.fetchone()[0] + print(f"expect_row_count is {expect_row_count}") + spark_config = args['spark'] + spark = SparkSession.builder.remote(spark_config['url']).getOrCreate() + actual_row_count = spark.sql("SELECT COUNT(*) FROM s1.t1").collect()[0][0] + print(f"actual_row_count is {actual_row_count}") + assert actual_row_count==expect_row_count + + +if __name__ == "__main__": + config = configparser.ConfigParser() + config.read("config.ini") + print({section: dict(config[section]) for section in config.sections()}) + check_spark_table(config) diff --git a/integration_tests/iceberg-cdc/python/config.ini b/integration_tests/iceberg-cdc/python/config.ini new file mode 100644 index 0000000000000..bd95eddc5b80e --- /dev/null +++ b/integration_tests/iceberg-cdc/python/config.ini @@ -0,0 +1,8 @@ +[spark] +url=sc://localhost:15002 + +[risingwave] +db=dev +user=root +host=127.0.0.1 +port=4566 diff --git a/integration_tests/iceberg-cdc/python/init.py b/integration_tests/iceberg-cdc/python/init.py new file mode 100644 index 0000000000000..289fa2f161889 --- /dev/null +++ b/integration_tests/iceberg-cdc/python/init.py @@ -0,0 +1,103 @@ +from pyspark.sql import SparkSession +import configparser +import psycopg2 + + +def init_spark_table(args): + spark_config = args['spark'] + spark = SparkSession.builder.remote(spark_config['url']).getOrCreate() + + init_table_sqls = [ + "CREATE SCHEMA IF NOT EXISTS s1", + "DROP TABLE IF EXISTS s1.t1", + """ + CREATE TABLE s1.t1 + ( + user_id string, + target_id string, + target_type string, + event_timestamp string, + behavior_type string, + parent_target_type string, + parent_target_id string + ) USING iceberg + TBLPROPERTIES ('format-version'='2'); + """, + ] + + for sql in init_table_sqls: + print(f"Executing sql: {sql}") + spark.sql(sql) + + +def init_risingwave_mv(args): + rw_config = args['risingwave'] + sqls = [ + "set streaming_parallelism = 4", + """ + CREATE TABLE user_behaviors ( + user_id VARCHAR, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp VARCHAR, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR, + PRIMARY KEY(user_id, target_id, event_timestamp) + ) with ( + connector = 'mysql-cdc', + hostname = 'mysql', + port = '3306', + username = 'root', + password = '123456', + database.name = 'mydb', + table.name = 'user_behaviors', + server.id = '1' + ); + """, + # f""" + # CREATE SINK s1 + # AS SELECT * FROM user_behaviors + # WITH ( + # connector='iceberg', + # type='upsert', + # primary_key = 'user_id, target_id, event_timestamp', + # catalog.type = 'storage', + # s3.endpoint = 'http://minio-0:9301', + # s3.access.key = 'hummockadmin', + # s3.secret.key = 'hummockadmin', + # database.name='demo', + # table.name='s1.t1',warehouse.path = 's3://hummock001/icebergdata/demo',s3.region = 'us-east-1' + # ); + # """ + f""" + CREATE SINK s1 + AS SELECT * FROM user_behaviors + WITH ( + connector='iceberg', + type='upsert', + primary_key = 'user_id, target_id, event_timestamp', + catalog.type = 'rest', + catalog.uri = 'http://rest:8181', + s3.endpoint = 'http://minio-0:9301', + s3.access.key = 'hummockadmin', + s3.secret.key = 'hummockadmin', + database.name='demo', + table.name='s1.t1',warehouse.path = 's3://icebergdata/demo/s1/t1',s3.region = 'us-east-1' + ); + """ + ] + with psycopg2.connect(database=rw_config['db'], user=rw_config['user'], host=rw_config['host'], + port=rw_config['port']) as conn: + with conn.cursor() as cursor: + for sql in sqls: + print(f"Executing sql {sql}") + cursor.execute(sql) + + +if __name__ == "__main__": + config = configparser.ConfigParser() + config.read("config.ini") + print({section: dict(config[section]) for section in config.sections()}) + init_spark_table(config) + init_risingwave_mv(config) diff --git a/integration_tests/iceberg-cdc/python/pyproject.toml b/integration_tests/iceberg-cdc/python/pyproject.toml new file mode 100644 index 0000000000000..4c7bce1165796 --- /dev/null +++ b/integration_tests/iceberg-cdc/python/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "icelake-integration-tests" +version = "0.0.9" +description = "" +authors = ["Renjie Liu "] +readme = "README.md" +packages = [{include = "icelake_integration_tests"}] + +[tool.poetry.dependencies] +python = "^3.11" +pyspark = { version = "3.4.1", extras = ["sql", "connect"] } +psycopg2-binary = "^2.9" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/integration_tests/iceberg-cdc/run_test.sh b/integration_tests/iceberg-cdc/run_test.sh new file mode 100755 index 0000000000000..2d8b691bc7284 --- /dev/null +++ b/integration_tests/iceberg-cdc/run_test.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Start test environment. +docker-compose up -d --wait + +# To avoid exiting by unhealth, set it after start environment. +set -ex + +# Generate data +docker build -t iceberg-cdc-datagen ../datagen +timeout 20 docker run --network=iceberg-cdc_default iceberg-cdc-datagen /datagen --mode clickstream --qps 1 mysql --user mysqluser --password mysqlpw --host mysql --port 3306 --db mydb & + +cd python +poetry update --quiet +# Init source, mv, and sink. +poetry run python init.py +# Wait for sink to be finished. +sleep 40; +poetry run python check.py diff --git a/integration_tests/iceberg-cdc/spark/.gitignore b/integration_tests/iceberg-cdc/spark/.gitignore new file mode 100644 index 0000000000000..51dcf07222856 --- /dev/null +++ b/integration_tests/iceberg-cdc/spark/.gitignore @@ -0,0 +1,3 @@ +derby.log +metastore_db +.ivy \ No newline at end of file diff --git a/integration_tests/iceberg-cdc/spark/spark-connect-server.sh b/integration_tests/iceberg-cdc/spark/spark-connect-server.sh new file mode 100755 index 0000000000000..7c1cd64f1a2f2 --- /dev/null +++ b/integration_tests/iceberg-cdc/spark/spark-connect-server.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -ex + +JARS=$(find /opt/spark/deps -type f -name "*.jar" | tr '\n' ':') + +/opt/spark/sbin/start-connect-server.sh \ + --master local[3] \ + --driver-class-path $JARS \ + --conf spark.driver.bindAddress=0.0.0.0 \ + --conf spark.sql.catalog.demo=org.apache.iceberg.spark.SparkCatalog \ + --conf spark.sql.extensions=org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions \ + --conf spark.sql.catalog.demo.catalog-impl=org.apache.iceberg.rest.RESTCatalog \ + --conf spark.sql.catalog.demo.uri=http://rest:8181 \ + --conf spark.sql.catalog.demo.s3.endpoint=http://minio-0:9301 \ + --conf spark.sql.catalog.demo.s3.path.style.access=true \ + --conf spark.sql.catalog.demo.s3.access.key=hummockadmin \ + --conf spark.sql.catalog.demo.s3.secret.key=hummockadmin \ + --conf spark.sql.defaultCatalog=demo + +tail -f /opt/spark/logs/spark*.out diff --git a/integration_tests/kafka-cdc-sink/risingwave.sql b/integration_tests/kafka-cdc-sink/risingwave.sql index 588731a33940d..fabddc9229e2d 100644 --- a/integration_tests/kafka-cdc-sink/risingwave.sql +++ b/integration_tests/kafka-cdc-sink/risingwave.sql @@ -22,6 +22,5 @@ connector = 'kafka', properties.bootstrap.server='message_queue:29092', topic = 'counts', type = 'debezium', -use_transaction = 'false', primary_key = 'id' ); \ No newline at end of file diff --git a/integration_tests/redis-sink/README.md b/integration_tests/redis-sink/README.md new file mode 100644 index 0000000000000..f2e5e64aec795 --- /dev/null +++ b/integration_tests/redis-sink/README.md @@ -0,0 +1,30 @@ +# Demo: Sinking to Redis + +In this demo, we want to showcase how RisingWave is able to sink data to Redis. + +1. Launch the cluster: + +```sh +docker compose up -d +``` + +The cluster contains a RisingWave cluster and its necessary dependencies, a datagen that generates the data, a Redis for sink. + + +2. Execute the SQL queries in sequence: + +- create_source.sql +- create_mv.sql +- create_sink.sql + +3. Execute a simple query: + +```sh +docker compose exec redis redis-ctl keys * + +``` +We also can use 'get' to query value + +```sql +select user_id, count(*) from default.demo_test group by user_id +``` diff --git a/integration_tests/redis-sink/create_mv.sql b/integration_tests/redis-sink/create_mv.sql new file mode 100644 index 0000000000000..0a803f8a2762d --- /dev/null +++ b/integration_tests/redis-sink/create_mv.sql @@ -0,0 +1,7 @@ +CREATE MATERIALIZED VIEW bhv_mv AS +SELECT + user_id, + target_id, + event_timestamp +FROM + user_behaviors; \ No newline at end of file diff --git a/integration_tests/redis-sink/create_sink.sql b/integration_tests/redis-sink/create_sink.sql new file mode 100644 index 0000000000000..2ba9ba67feb39 --- /dev/null +++ b/integration_tests/redis-sink/create_sink.sql @@ -0,0 +1,15 @@ +CREATE SINK bhv_redis_sink_1 +FROM + bhv_mv WITH ( + primary_key = 'user_id', + connector = 'redis', + redis.url= 'redis://127.0.0.1:6379/', +)FORMAT PLAIN ENCODE JSON(force_append_only='true'); + +CREATE SINK bhv_redis_sink_2 +FROM + bhv_mv WITH ( + primary_key = 'user_id', + connector = 'redis', + redis.url= 'redis://127.0.0.1:6379/', +)FORMAT PLAIN ENCODE TEMPLATE(force_append_only='true', key_format = 'UserID:{user_id}', value_format = 'TargetID:{target_id},EventTimestamp{event_timestamp}'); \ No newline at end of file diff --git a/integration_tests/redis-sink/create_source.sql b/integration_tests/redis-sink/create_source.sql new file mode 100644 index 0000000000000..f64e2ccbec82a --- /dev/null +++ b/integration_tests/redis-sink/create_source.sql @@ -0,0 +1,16 @@ +CREATE table user_behaviors ( + user_id INT, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMP, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR, + PRIMARY KEY(user_id) +) WITH ( + connector = 'datagen', + fields.user_id.kind = 'sequence', + fields.user_id.start = 1, + fields.user_id.end = 100, + datagen.rows.per.second = '100' +) FORMAT PLAIN ENCODE JSON; \ No newline at end of file diff --git a/integration_tests/redis-sink/docker-compose.yml b/integration_tests/redis-sink/docker-compose.yml new file mode 100644 index 0000000000000..a850f9b35c431 --- /dev/null +++ b/integration_tests/redis-sink/docker-compose.yml @@ -0,0 +1,71 @@ +--- +version: "3" +services: + redis: + image: 'redis:latest' + expose: + - 6379 + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 30s + retries: 50 + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode clickstream --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/scripts/check_data.py b/integration_tests/scripts/check_data.py index 02d1c44c55229..52cc79dc0ab0a 100644 --- a/integration_tests/scripts/check_data.py +++ b/integration_tests/scripts/check_data.py @@ -23,7 +23,7 @@ def check_mv(rel: str): rows = run_psql("SELECT COUNT(*) FROM {}_mv".format(rel)) rows = int(rows.decode('utf8').strip()) print("{} rows in {}".format(rows, rel)) - assert rows >= 1 + return rows >= 1 # Check the number of rows of cdc table @@ -67,8 +67,12 @@ def run_psql(sql): for rel in relations: create_mv(rel) time.sleep(20) + failed_cases = [] for rel in relations: - check_mv(rel) + if not check_mv(rel): + failed_cases.append(rel) + if len(failed_cases) != 0: + raise Exception("Data check failed for case {}".format(failed_cases)) cdc_check_file = os.path.join(demo_dir, 'cdc_check') if not os.path.exists(cdc_check_file): diff --git a/integration_tests/scripts/run_demos.py b/integration_tests/scripts/run_demos.py index 28623f7ddc4a7..da2519e18db44 100644 --- a/integration_tests/scripts/run_demos.py +++ b/integration_tests/scripts/run_demos.py @@ -42,6 +42,13 @@ def run_demo(demo: str, format: str, wait_time = 40): run_sql_file(sql_file, demo_dir) sleep(10) +def iceberg_cdc_demo(): + demo = "iceberg-cdc" + file_dir = dirname(abspath(__file__)) + project_dir = dirname(file_dir) + demo_dir = os.path.join(project_dir, demo) + print("Running demo: iceberg-cdc") + subprocess.run(["bash","./run_test.sh"], cwd=demo_dir, check=True) def run_iceberg_demo(): demo = "iceberg-sink" @@ -149,5 +156,7 @@ def run_clickhouse_demo(): run_iceberg_demo() elif args.case == "clickhouse-sink": run_clickhouse_demo() +elif args.case == "iceberg-cdc": + iceberg_cdc_demo() else: run_demo(args.case, args.format) diff --git a/integration_tests/twitter-pulsar/pb/create_mv.sql b/integration_tests/twitter-pulsar/pb/create_mv.sql index c08722bacdbb3..06d2eb14e4074 100644 --- a/integration_tests/twitter-pulsar/pb/create_mv.sql +++ b/integration_tests/twitter-pulsar/pb/create_mv.sql @@ -4,7 +4,7 @@ CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( SELECT unnest(regexp_matches((data).text, '#\w+', 'g')) AS hashtag, - (data).created_at :: timestamp AS created_at + (data).created_at :: timestamptz AS created_at FROM twitter ) diff --git a/integration_tests/twitter/pb/create_mv.sql b/integration_tests/twitter/pb/create_mv.sql index c08722bacdbb3..06d2eb14e4074 100644 --- a/integration_tests/twitter/pb/create_mv.sql +++ b/integration_tests/twitter/pb/create_mv.sql @@ -4,7 +4,7 @@ CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( SELECT unnest(regexp_matches((data).text, '#\w+', 'g')) AS hashtag, - (data).created_at :: timestamp AS created_at + (data).created_at :: timestamptz AS created_at FROM twitter ) diff --git a/java/com_risingwave_java_binding_Binding.h b/java/com_risingwave_java_binding_Binding.h index c2c027ed22b58..801aa046502d3 100644 --- a/java/com_risingwave_java_binding_Binding.h +++ b/java/com_risingwave_java_binding_Binding.h @@ -223,6 +223,46 @@ JNIEXPORT void JNICALL Java_com_risingwave_java_binding_Binding_streamChunkItera JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_streamChunkIteratorFromPretty (JNIEnv *, jclass, jstring); +/* + * Class: com_risingwave_java_binding_Binding + * Method: sendCdcSourceMsgToChannel + * Signature: (J[B)Z + */ +JNIEXPORT jboolean JNICALL Java_com_risingwave_java_binding_Binding_sendCdcSourceMsgToChannel + (JNIEnv *, jclass, jlong, jbyteArray); + +/* + * Class: com_risingwave_java_binding_Binding + * Method: recvSinkWriterRequestFromChannel + * Signature: (J)[B + */ +JNIEXPORT jbyteArray JNICALL Java_com_risingwave_java_binding_Binding_recvSinkWriterRequestFromChannel + (JNIEnv *, jclass, jlong); + +/* + * Class: com_risingwave_java_binding_Binding + * Method: sendSinkWriterResponseToChannel + * Signature: (J[B)Z + */ +JNIEXPORT jboolean JNICALL Java_com_risingwave_java_binding_Binding_sendSinkWriterResponseToChannel + (JNIEnv *, jclass, jlong, jbyteArray); + +/* + * Class: com_risingwave_java_binding_Binding + * Method: recvSinkCoordinatorRequestFromChannel + * Signature: (J)[B + */ +JNIEXPORT jbyteArray JNICALL Java_com_risingwave_java_binding_Binding_recvSinkCoordinatorRequestFromChannel + (JNIEnv *, jclass, jlong); + +/* + * Class: com_risingwave_java_binding_Binding + * Method: sendSinkCoordinatorResponseToChannel + * Signature: (J[B)Z + */ +JNIEXPORT jboolean JNICALL Java_com_risingwave_java_binding_Binding_sendSinkCoordinatorResponseToChannel + (JNIEnv *, jclass, jlong, jbyteArray); + #ifdef __cplusplus } #endif diff --git a/java/common-utils/pom.xml b/java/common-utils/pom.xml index ed60690d58aec..c74e4082c42c4 100644 --- a/java/common-utils/pom.xml +++ b/java/common-utils/pom.xml @@ -1,11 +1,11 @@ + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT 4.0.0 @@ -15,11 +15,12 @@ 11 11 UTF-8 + true - com.risingwave.java + com.risingwave proto @@ -29,4 +30,4 @@ - \ No newline at end of file + diff --git a/java/connector-node/assembly/pom.xml b/java/connector-node/assembly/pom.xml index 6812bac5b63e6..6a68567718b25 100644 --- a/java/connector-node/assembly/pom.xml +++ b/java/connector-node/assembly/pom.xml @@ -1,11 +1,12 @@ - + 4.0.0 - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml @@ -22,31 +23,31 @@ - com.risingwave.java + com.risingwave risingwave-connector-service - com.risingwave.java + com.risingwave risingwave-source-cdc - com.risingwave.java + com.risingwave risingwave-sink-es-7 - com.risingwave.java + com.risingwave risingwave-sink-cassandra - com.risingwave.java + com.risingwave risingwave-sink-jdbc - com.risingwave.java + com.risingwave risingwave-sink-iceberg - com.risingwave.java + com.risingwave risingwave-sink-deltalake @@ -56,7 +57,7 @@ - com.risingwave.java + com.risingwave s3-common @@ -107,4 +108,4 @@ - + \ No newline at end of file diff --git a/java/connector-node/connector-api/pom.xml b/java/connector-node/connector-api/pom.xml index fee5e3da60825..39fe113d448f5 100644 --- a/java/connector-node/connector-api/pom.xml +++ b/java/connector-node/connector-api/pom.xml @@ -1,27 +1,29 @@ - 4.0.0 - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml connector-api - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT connector-api 11 11 + true - com.risingwave.java + com.risingwave proto diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java index e443a7d3e286e..9140558c41cd6 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java @@ -39,7 +39,4 @@ public Data.Op getOp() { public int size() { return values.length; } - - @Override - public void close() throws Exception {} } diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java index 0ae0aa3facf7e..dcddfc07479b6 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java @@ -16,7 +16,7 @@ import com.risingwave.proto.Data; -public interface SinkRow extends AutoCloseable { +public interface SinkRow { Object get(int index); Data.Op getOp(); diff --git a/java/connector-node/python-client/integration_tests.py b/java/connector-node/python-client/integration_tests.py index 962f2a658018a..64fa949f48ce2 100644 --- a/java/connector-node/python-client/integration_tests.py +++ b/java/connector-node/python-client/integration_tests.py @@ -272,7 +272,7 @@ def test_jdbc_sink(input_file, param): def test_elasticsearch_sink(param): prop = { - "connector": "elasticsearch-7", + "connector": "elasticsearch", "url": "http://127.0.0.1:9200", "index": "test", } diff --git a/java/connector-node/risingwave-connector-service/pom.xml b/java/connector-node/risingwave-connector-service/pom.xml index 4e2dbe1d6ec96..e99a689d9c688 100644 --- a/java/connector-node/risingwave-connector-service/pom.xml +++ b/java/connector-node/risingwave-connector-service/pom.xml @@ -3,9 +3,9 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml 4.0.0 @@ -18,19 +18,20 @@ 11 UTF-8 11 + true - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave java-binding - com.risingwave.java + com.risingwave connector-api @@ -73,34 +74,35 @@ test - + - com.risingwave.java + com.risingwave risingwave-source-cdc provided - com.risingwave.java + com.risingwave risingwave-sink-jdbc provided - com.risingwave.java + com.risingwave risingwave-sink-iceberg provided - com.risingwave.java + com.risingwave risingwave-sink-deltalake provided - com.risingwave.java + com.risingwave risingwave-sink-es-7 provided - com.risingwave.java + com.risingwave risingwave-sink-cassandra provided diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java index 5ea2db204a06c..0959b389e55ca 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java @@ -60,28 +60,25 @@ public FileSink(FileSinkConfig config, TableSchema tableSchema) { @Override public void write(Iterator rows) { while (rows.hasNext()) { - try (SinkRow row = rows.next()) { - switch (row.getOp()) { - case INSERT: - String buf = - new Gson() - .toJson( - IntStream.range(0, row.size()) - .mapToObj(row::get) - .toArray()); - try { - sinkWriter.write(buf + System.lineSeparator()); - } catch (IOException e) { - throw INTERNAL.withCause(e).asRuntimeException(); - } - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) - .asRuntimeException(); - } - } catch (Exception e) { - throw new RuntimeException(e); + SinkRow row = rows.next(); + switch (row.getOp()) { + case INSERT: + String buf = + new Gson() + .toJson( + IntStream.range(0, row.size()) + .mapToObj(row::get) + .toArray()); + try { + sinkWriter.write(buf + System.lineSeparator()); + } catch (IOException e) { + throw INTERNAL.withCause(e).asRuntimeException(); + } + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); } } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkCoordinatorHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkCoordinatorHandler.java new file mode 100644 index 0000000000000..92ecb78ee31ea --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkCoordinatorHandler.java @@ -0,0 +1,50 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import com.risingwave.java.binding.Binding; +import com.risingwave.proto.ConnectorServiceProto; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JniSinkCoordinatorHandler { + private static final Logger LOG = LoggerFactory.getLogger(JniSinkCoordinatorHandler.class); + + public static void runJniSinkCoordinatorThread(long requestRxPtr, long responseTxPtr) { + // For jni.rs + java.lang.Thread.currentThread() + .setContextClassLoader(java.lang.ClassLoader.getSystemClassLoader()); + JniSinkCoordinatorResponseObserver responseObserver = + new JniSinkCoordinatorResponseObserver(responseTxPtr); + SinkCoordinatorStreamObserver sinkCoordinatorStreamObserver = + new SinkCoordinatorStreamObserver(responseObserver); + try { + byte[] requestBytes; + while ((requestBytes = Binding.recvSinkCoordinatorRequestFromChannel(requestRxPtr)) + != null) { + var request = + ConnectorServiceProto.SinkCoordinatorStreamRequest.parseFrom(requestBytes); + sinkCoordinatorStreamObserver.onNext(request); + if (!responseObserver.isSuccess()) { + throw new RuntimeException("fail to sendSinkCoordinatorResponseToChannel"); + } + } + sinkCoordinatorStreamObserver.onCompleted(); + } catch (Throwable t) { + sinkCoordinatorStreamObserver.onError(t); + } + LOG.info("end of runJniSinkCoordinatorThread"); + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkCoordinatorResponseObserver.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkCoordinatorResponseObserver.java new file mode 100644 index 0000000000000..2a04e23f1a0b2 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkCoordinatorResponseObserver.java @@ -0,0 +1,55 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import com.risingwave.java.binding.Binding; +import com.risingwave.proto.ConnectorServiceProto; +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JniSinkCoordinatorResponseObserver + implements StreamObserver { + private static final Logger LOG = + LoggerFactory.getLogger(JniSinkCoordinatorResponseObserver.class); + private long responseTxPtr; + + private boolean success; + + public JniSinkCoordinatorResponseObserver(long responseTxPtr) { + this.responseTxPtr = responseTxPtr; + } + + @Override + public void onNext(ConnectorServiceProto.SinkCoordinatorStreamResponse response) { + this.success = + Binding.sendSinkCoordinatorResponseToChannel( + this.responseTxPtr, response.toByteArray()); + } + + @Override + public void onError(Throwable throwable) { + LOG.error("JniSinkCoordinatorHandler onError: ", throwable); + } + + @Override + public void onCompleted() { + LOG.info("JniSinkCoordinatorHandler onCompleted"); + } + + public boolean isSuccess() { + return success; + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkValidationHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkValidationHandler.java new file mode 100644 index 0000000000000..3adfa42658704 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkValidationHandler.java @@ -0,0 +1,80 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import static com.risingwave.connector.SinkUtils.getConnectorName; + +import com.risingwave.connector.api.TableSchema; +import com.risingwave.connector.api.sink.SinkFactory; +import com.risingwave.proto.ConnectorServiceProto; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JniSinkValidationHandler { + static final Logger LOG = LoggerFactory.getLogger(SinkValidationHandler.class); + + public static byte[] validate(byte[] validateSinkRequestBytes) + throws com.google.protobuf.InvalidProtocolBufferException { + try { + var request = + ConnectorServiceProto.ValidateSinkRequest.parseFrom(validateSinkRequestBytes); + + // For jni.rs + java.lang.Thread.currentThread() + .setContextClassLoader(java.lang.ClassLoader.getSystemClassLoader()); + + ConnectorServiceProto.SinkParam sinkParam = request.getSinkParam(); + TableSchema tableSchema = TableSchema.fromProto(sinkParam.getTableSchema()); + String connectorName = getConnectorName(request.getSinkParam()); + SinkFactory sinkFactory = SinkUtils.getSinkFactory(connectorName); + sinkFactory.validate( + tableSchema, sinkParam.getPropertiesMap(), sinkParam.getSinkType()); + + return ConnectorServiceProto.ValidateSinkResponse.newBuilder().build().toByteArray(); + } catch (IllegalArgumentException e) { + LOG.error("sink validation failed", e); + // Extract useful information from the error thrown by Jackson and convert it into a + // more concise message. + String errorMessage = e.getLocalizedMessage(); + Pattern missingFieldPattern = Pattern.compile("Missing creator property '([^']*)'"); + Pattern unrecognizedFieldPattern = Pattern.compile("Unrecognized field \"([^\"]*)\""); + Matcher missingFieldMatcher = missingFieldPattern.matcher(errorMessage); + Matcher unrecognizedFieldMatcher = unrecognizedFieldPattern.matcher(errorMessage); + if (missingFieldMatcher.find()) { + errorMessage = "missing field `" + missingFieldMatcher.group(1) + "`"; + } else if (unrecognizedFieldMatcher.find()) { + errorMessage = "unknown field `" + unrecognizedFieldMatcher.group(1) + "`"; + } + return ConnectorServiceProto.ValidateSinkResponse.newBuilder() + .setError( + ConnectorServiceProto.ValidationError.newBuilder() + .setErrorMessage(errorMessage) + .build()) + .build() + .toByteArray(); + } catch (Exception e) { + LOG.error("sink validation failed", e); + return ConnectorServiceProto.ValidateSinkResponse.newBuilder() + .setError( + ConnectorServiceProto.ValidationError.newBuilder() + .setErrorMessage(e.getMessage()) + .build()) + .build() + .toByteArray(); + } + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkWriterHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkWriterHandler.java new file mode 100644 index 0000000000000..6e6e27761ba3b --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkWriterHandler.java @@ -0,0 +1,49 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import com.risingwave.java.binding.Binding; +import com.risingwave.proto.ConnectorServiceProto; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JniSinkWriterHandler { + private static final Logger LOG = LoggerFactory.getLogger(JniSinkWriterHandler.class); + + public static void runJniSinkWriterThread(long requestRxPtr, long responseTxPtr) { + // For jni.rs + java.lang.Thread.currentThread() + .setContextClassLoader(java.lang.ClassLoader.getSystemClassLoader()); + JniSinkWriterResponseObserver responseObserver = + new JniSinkWriterResponseObserver(responseTxPtr); + SinkWriterStreamObserver sinkWriterStreamObserver = + new SinkWriterStreamObserver(responseObserver); + try { + byte[] requestBytes; + while ((requestBytes = Binding.recvSinkWriterRequestFromChannel(requestRxPtr)) + != null) { + var request = ConnectorServiceProto.SinkWriterStreamRequest.parseFrom(requestBytes); + sinkWriterStreamObserver.onNext(request); + if (!responseObserver.isSuccess()) { + throw new RuntimeException("fail to sendSinkWriterResponseToChannel"); + } + } + sinkWriterStreamObserver.onCompleted(); + } catch (Throwable t) { + sinkWriterStreamObserver.onError(t); + } + LOG.info("end of runJniSinkWriterThread"); + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkWriterResponseObserver.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkWriterResponseObserver.java new file mode 100644 index 0000000000000..735d85a06c1fc --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JniSinkWriterResponseObserver.java @@ -0,0 +1,53 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import com.risingwave.java.binding.Binding; +import com.risingwave.proto.ConnectorServiceProto; +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JniSinkWriterResponseObserver + implements StreamObserver { + private static final Logger LOG = LoggerFactory.getLogger(JniSinkWriterResponseObserver.class); + private long responseTxPtr; + + private boolean success; + + public JniSinkWriterResponseObserver(long responseTxPtr) { + this.responseTxPtr = responseTxPtr; + } + + @Override + public void onNext(ConnectorServiceProto.SinkWriterStreamResponse response) { + this.success = + Binding.sendSinkWriterResponseToChannel(this.responseTxPtr, response.toByteArray()); + } + + @Override + public void onError(Throwable throwable) { + LOG.error("JniSinkWriterHandler onError: ", throwable); + } + + @Override + public void onCompleted() { + LOG.info("JniSinkWriterHandler onCompleted"); + } + + public boolean isSuccess() { + return success; + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java index ab3ac84346fa6..944d529a02d8d 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java @@ -41,8 +41,8 @@ public static SinkFactory getSinkFactory(String sinkName) { return new IcebergSinkFactory(); case "deltalake": return new DeltaLakeSinkFactory(); - case "elasticsearch-7": - return new EsSink7Factory(); + case "elasticsearch": + return new EsSinkFactory(); case "cassandra": return new CassandraFactory(); default: diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkWriterStreamObserver.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkWriterStreamObserver.java index 5caa9e3533e54..1323133519165 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkWriterStreamObserver.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkWriterStreamObserver.java @@ -38,6 +38,8 @@ public class SinkWriterStreamObserver private TableSchema tableSchema; + private boolean finished = false; + private boolean epochStarted; private long currentEpoch; private Long currentBatchId; @@ -58,6 +60,9 @@ public SinkWriterStreamObserver( @Override public void onNext(ConnectorServiceProto.SinkWriterStreamRequest sinkTask) { + if (finished) { + throw new RuntimeException("unexpected onNext call on a finished writer stream"); + } try { if (sinkTask.hasStart()) { if (isInitialized()) { @@ -169,26 +174,27 @@ public void onNext(ConnectorServiceProto.SinkWriterStreamRequest sinkTask) { throw INVALID_ARGUMENT.withDescription("invalid sink task").asRuntimeException(); } } catch (Exception e) { - LOG.error("sink task error: ", e); + LOG.error("sink writer error: ", e); + cleanup(); responseObserver.onError(e); } } @Override public void onError(Throwable throwable) { - LOG.error("sink task error: ", throwable); + LOG.error("sink writer finishes with error: ", throwable); cleanup(); - responseObserver.onError(throwable); } @Override public void onCompleted() { - LOG.debug("sink task completed"); + LOG.info("sink writer completed"); cleanup(); responseObserver.onCompleted(); } private void cleanup() { + finished = true; if (sink != null) { sink.drop(); } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java index a8175bb2f738d..ab9a9068fabb9 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java @@ -251,14 +251,12 @@ public CloseableIterator deserialize( static class StreamChunkRowWrapper implements SinkRow { - private boolean isClosed; private final StreamChunkRow inner; private final ValueGetter[] valueGetters; StreamChunkRowWrapper(StreamChunkRow inner, ValueGetter[] valueGetters) { this.inner = inner; this.valueGetters = valueGetters; - this.isClosed = false; } @Override @@ -275,14 +273,6 @@ public Data.Op getOp() { public int size() { return valueGetters.length; } - - @Override - public void close() { - if (!isClosed) { - this.isClosed = true; - inner.close(); - } - } } static class StreamChunkIteratorWrapper implements CloseableIterator { @@ -299,13 +289,6 @@ public StreamChunkIteratorWrapper(StreamChunkIterator iter, ValueGetter[] valueG @Override public void close() { iter.close(); - try { - if (row != null) { - row.close(); - } - } catch (Exception e) { - throw new RuntimeException(e); - } } @Override diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/JniSourceValidateHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/JniSourceValidateHandler.java new file mode 100644 index 0000000000000..a25bf0dee065a --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/JniSourceValidateHandler.java @@ -0,0 +1,49 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector.source; + +import static com.risingwave.connector.source.SourceValidateHandler.validateResponse; +import static com.risingwave.connector.source.SourceValidateHandler.validateSource; + +import com.risingwave.proto.ConnectorServiceProto; +import io.grpc.StatusRuntimeException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JniSourceValidateHandler { + static final Logger LOG = LoggerFactory.getLogger(JniSourceValidateHandler.class); + + public static byte[] validate(byte[] validateSourceRequestBytes) + throws com.google.protobuf.InvalidProtocolBufferException { + try { + var request = + ConnectorServiceProto.ValidateSourceRequest.parseFrom( + validateSourceRequestBytes); + + // For jni.rs + java.lang.Thread.currentThread() + .setContextClassLoader(java.lang.ClassLoader.getSystemClassLoader()); + validateSource(request); + // validate pass + return ConnectorServiceProto.ValidateSourceResponse.newBuilder().build().toByteArray(); + } catch (StatusRuntimeException e) { + LOG.warn("Source validation failed", e); + return validateResponse(e.getMessage()).toByteArray(); + } catch (Exception e) { + LOG.error("Internal error on source validation", e); + return validateResponse("Internal error: " + e.getMessage()).toByteArray(); + } + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/SourceValidateHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/SourceValidateHandler.java index 2611d1cca676b..18517ebb6dbf3 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/SourceValidateHandler.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/SourceValidateHandler.java @@ -15,10 +15,7 @@ package com.risingwave.connector.source; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.source.common.DbzConnectorConfig; -import com.risingwave.connector.source.common.MySqlValidator; -import com.risingwave.connector.source.common.PostgresValidator; -import com.risingwave.connector.source.common.ValidatorUtils; +import com.risingwave.connector.source.common.*; import com.risingwave.proto.ConnectorServiceProto; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; @@ -56,7 +53,7 @@ public void handle(ConnectorServiceProto.ValidateSourceRequest request) { } } - private ConnectorServiceProto.ValidateSourceResponse validateResponse(String message) { + public static ConnectorServiceProto.ValidateSourceResponse validateResponse(String message) { return ConnectorServiceProto.ValidateSourceResponse.newBuilder() .setError( ConnectorServiceProto.ValidationError.newBuilder() @@ -65,14 +62,14 @@ private ConnectorServiceProto.ValidateSourceResponse validateResponse(String mes .build(); } - private void ensurePropNotNull(Map props, String name) { + public static void ensurePropNotNull(Map props, String name) { if (!props.containsKey(name)) { throw ValidatorUtils.invalidArgument( String.format("'%s' not found, please check the WITH properties", name)); } } - private void validateSource(ConnectorServiceProto.ValidateSourceRequest request) + public static void validateSource(ConnectorServiceProto.ValidateSourceRequest request) throws Exception { var props = request.getPropertiesMap(); @@ -97,7 +94,7 @@ private void validateSource(ConnectorServiceProto.ValidateSourceRequest request) case CITUS: ensurePropNotNull(props, DbzConnectorConfig.PG_SCHEMA_NAME); - try (var coordinatorValidator = new PostgresValidator(props, tableSchema)) { + try (var coordinatorValidator = new CitusValidator(props, tableSchema)) { coordinatorValidator.validateDistributedTable(); coordinatorValidator.validateTable(); } @@ -110,13 +107,12 @@ private void validateSource(ConnectorServiceProto.ValidateSourceRequest request) for (String workerAddr : workerServers) { String[] hostPort = StringUtils.split(workerAddr, ':'); if (hostPort.length != 2) { - throw ValidatorUtils.invalidArgument( - String.format("invalid database.servers")); + throw ValidatorUtils.invalidArgument("invalid database.servers"); } // set HOST for each worker server mutableProps.put(DbzConnectorConfig.HOST, hostPort[0]); mutableProps.put(DbzConnectorConfig.PORT, hostPort[1]); - try (var workerValidator = new PostgresValidator(mutableProps, tableSchema)) { + try (var workerValidator = new CitusValidator(mutableProps, tableSchema)) { workerValidator.validateDbConfig(); workerValidator.validateUserPrivilege(); } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/CitusValidator.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/CitusValidator.java new file mode 100644 index 0000000000000..db9a85b548d36 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/CitusValidator.java @@ -0,0 +1,34 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector.source.common; + +import com.risingwave.connector.api.TableSchema; +import java.sql.SQLException; +import java.util.Map; + +public class CitusValidator extends PostgresValidator { + public CitusValidator(Map userProps, TableSchema tableSchema) + throws SQLException { + super(userProps, tableSchema); + } + + @Override + protected void alterPublicationIfNeeded() throws SQLException { + // do nothing for citus worker node, + // since we created a FOR ALL TABLES publication when creating the connector, + // which will replicates changes for all tables in the database, including tables created in + // the future. + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/PostgresValidator.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/PostgresValidator.java index a90ea91de7c01..25aced532112b 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/PostgresValidator.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/PostgresValidator.java @@ -460,7 +460,7 @@ private void validatePublicationPrivileges() throws SQLException { } } - private void alterPublicationIfNeeded() throws SQLException { + protected void alterPublicationIfNeeded() throws SQLException { String alterPublicationSql = String.format( "ALTER PUBLICATION %s ADD TABLE %s", pubName, schemaName + "." + tableName); diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEngineRunner.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEngineRunner.java index e9fef6e869c04..ba9511b02303b 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEngineRunner.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEngineRunner.java @@ -70,6 +70,33 @@ public static CdcEngineRunner newCdcEngineRunner( return runner; } + public static CdcEngineRunner newCdcEngineRunner(DbzConnectorConfig config) { + DbzCdcEngineRunner runner = null; + try { + var sourceId = config.getSourceId(); + var engine = + new DbzCdcEngine( + config.getSourceId(), + config.getResolvedDebeziumProps(), + (success, message, error) -> { + if (!success) { + LOG.error( + "engine#{} terminated with error. message: {}", + sourceId, + message, + error); + } else { + LOG.info("engine#{} stopped normally. {}", sourceId, message); + } + }); + + runner = new DbzCdcEngineRunner(engine); + } catch (Exception e) { + LOG.error("failed to create the CDC engine", e); + } + return runner; + } + /** Start to run the cdc engine */ public void start() { if (isRunning()) { diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEventConsumer.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEventConsumer.java index 9ec96c90f41f2..9f3fc3d17e39b 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEventConsumer.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/DbzCdcEventConsumer.java @@ -72,25 +72,15 @@ public void handleBatch( List> events, DebeziumEngine.RecordCommitter> committer) throws InterruptedException { - var builder = GetEventStreamResponse.newBuilder(); + var respBuilder = GetEventStreamResponse.newBuilder(); for (ChangeEvent event : events) { var record = event.value(); - if (isHeartbeatEvent(record)) { - // skip heartbeat events - continue; - } - // ignore null record - if (record.value() == null) { - committer.markProcessed(event); - continue; - } - byte[] payload = - converter.fromConnectData(record.topic(), record.valueSchema(), record.value()); - - // serialize the offset to a JSON, so that kernel doesn't need to - // aware the layout of it + boolean isHeartbeat = isHeartbeatEvent(record); DebeziumOffset offset = - new DebeziumOffset(record.sourcePartition(), record.sourceOffset()); + new DebeziumOffset( + record.sourcePartition(), record.sourceOffset(), isHeartbeat); + // serialize the offset to a JSON, so that kernel doesn't need to + // aware its layout String offsetStr = ""; try { byte[] serialized = DebeziumOffsetSerializer.INSTANCE.serialize(offset); @@ -98,19 +88,42 @@ var record = event.value(); } catch (IOException e) { LOG.warn("failed to serialize debezium offset", e); } - var message = + + var msgBuilder = CdcMessage.newBuilder() .setOffset(offsetStr) - .setPartition(String.valueOf(sourceId)) - .setPayload(new String(payload, StandardCharsets.UTF_8)) - .build(); - LOG.debug("record => {}", message.getPayload()); - builder.addEvents(message); - committer.markProcessed(event); + .setPartition(String.valueOf(sourceId)); + + if (isHeartbeat) { + var message = msgBuilder.build(); + LOG.debug("heartbeat => {}", message.getOffset()); + respBuilder.addEvents(message); + } else { + // ignore null record + if (record.value() == null) { + committer.markProcessed(event); + continue; + } + byte[] payload = + converter.fromConnectData( + record.topic(), record.valueSchema(), record.value()); + + msgBuilder.setPayload(new String(payload, StandardCharsets.UTF_8)).build(); + var message = msgBuilder.build(); + LOG.debug("record => {}", message.getPayload()); + + respBuilder.addEvents(message); + committer.markProcessed(event); + } } - builder.setSourceId(sourceId); - var response = builder.build(); - outputChannel.put(response); + + // skip empty batch + if (respBuilder.getEventsCount() > 0) { + respBuilder.setSourceId(sourceId); + var response = respBuilder.build(); + outputChannel.put(response); + } + committer.markBatchFinished(); } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/JniDbzSourceHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/JniDbzSourceHandler.java new file mode 100644 index 0000000000000..73e8875f4581c --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/core/JniDbzSourceHandler.java @@ -0,0 +1,107 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector.source.core; + +import com.risingwave.connector.api.source.SourceTypeE; +import com.risingwave.connector.source.common.DbzConnectorConfig; +import com.risingwave.java.binding.Binding; +import com.risingwave.metrics.ConnectorNodeMetrics; +import com.risingwave.proto.ConnectorServiceProto; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** handler for starting a debezium source connectors for jni */ +public class JniDbzSourceHandler { + static final Logger LOG = LoggerFactory.getLogger(DbzSourceHandler.class); + + private final DbzConnectorConfig config; + + public JniDbzSourceHandler(DbzConnectorConfig config) { + this.config = config; + } + + public static void runJniDbzSourceThread(byte[] getEventStreamRequestBytes, long channelPtr) + throws com.google.protobuf.InvalidProtocolBufferException { + var request = + ConnectorServiceProto.GetEventStreamRequest.parseFrom(getEventStreamRequestBytes); + + // For jni.rs + java.lang.Thread.currentThread() + .setContextClassLoader(java.lang.ClassLoader.getSystemClassLoader()); + // userProps extracted from request, underlying implementation is UnmodifiableMap + Map mutableUserProps = new HashMap<>(request.getPropertiesMap()); + mutableUserProps.put("source.id", Long.toString(request.getSourceId())); + var config = + new DbzConnectorConfig( + SourceTypeE.valueOf(request.getSourceType()), + request.getSourceId(), + request.getStartOffset(), + mutableUserProps, + request.getSnapshotDone()); + JniDbzSourceHandler handler = new JniDbzSourceHandler(config); + handler.start(channelPtr); + } + + public void start(long channelPtr) { + var runner = DbzCdcEngineRunner.newCdcEngineRunner(config); + if (runner == null) { + return; + } + + try { + // Start the engine + runner.start(); + LOG.info("Start consuming events of table {}", config.getSourceId()); + + while (runner.isRunning()) { + // check whether the send queue has room for new messages + // Thread will block on the channel to get output from engine + var resp = runner.getEngine().getOutputChannel().poll(500, TimeUnit.MILLISECONDS); + boolean success; + if (resp != null) { + ConnectorNodeMetrics.incSourceRowsReceived( + config.getSourceType().toString(), + String.valueOf(config.getSourceId()), + resp.getEventsCount()); + LOG.debug( + "Engine#{}: emit one chunk {} events to network ", + config.getSourceId(), + resp.getEventsCount()); + success = Binding.sendCdcSourceMsgToChannel(channelPtr, resp.toByteArray()); + } else { + // If resp is null means just check whether channel is closed. + success = Binding.sendCdcSourceMsgToChannel(channelPtr, null); + } + if (!success) { + LOG.info( + "Engine#{}: JNI sender broken detected, stop the engine", + config.getSourceId()); + runner.stop(); + return; + } + } + } catch (Throwable t) { + LOG.error("Cdc engine failed.", t); + try { + runner.stop(); + } catch (Exception e) { + LOG.warn("Failed to stop Engine#{}", config.getSourceId(), e); + } + } + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties b/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties index b99c6dcf5971b..cc64723b66b60 100644 --- a/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties +++ b/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties @@ -17,6 +17,12 @@ table.include.list=${database.name}.${table.name} include.schema.changes=${debezium.include.schema.changes:-false} database.server.id=${server.id} +# set connector timezone to UTC(+00:00) +database.connectionTimeZone=+00:00 + +# default heartbeat interval 60 seconds +heartbeat.interval.ms=${debezium.heartbeat.interval.ms:-60000} +heartbeat.topics.prefix=${debezium.heartbeat.topics.prefix:-RW_CDC_HeartBeat_} name=${hostname}:${port}:${database.name}.${table.name} provide.transaction.metadata=${transactional:-false} diff --git a/java/connector-node/risingwave-connector-test/pom.xml b/java/connector-node/risingwave-connector-test/pom.xml index 7f8f6f1bc49cc..a01049746be8e 100644 --- a/java/connector-node/risingwave-connector-test/pom.xml +++ b/java/connector-node/risingwave-connector-test/pom.xml @@ -1,10 +1,11 @@ - + - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml 4.0.0 @@ -19,22 +20,22 @@ - com.risingwave.java + com.risingwave connector-api test - com.risingwave.java + com.risingwave risingwave-sink-deltalake test - com.risingwave.java + com.risingwave risingwave-sink-iceberg test - com.risingwave.java + com.risingwave s3-common test @@ -155,29 +156,29 @@ - com.risingwave.java + com.risingwave risingwave-source-cdc test - com.risingwave.java + com.risingwave risingwave-connector-service test - com.risingwave.java + com.risingwave risingwave-sink-jdbc test - com.risingwave.java + com.risingwave risingwave-sink-es-7 test - com.risingwave.java + com.risingwave risingwave-sink-cassandra test - + \ No newline at end of file diff --git a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/elasticsearch/EsSink7Test.java b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/elasticsearch/EsSinkTest.java similarity index 94% rename from java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/elasticsearch/EsSink7Test.java rename to java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/elasticsearch/EsSinkTest.java index e3024ff09b26e..af0ea7190f946 100644 --- a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/elasticsearch/EsSink7Test.java +++ b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/elasticsearch/EsSinkTest.java @@ -19,8 +19,8 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Lists; -import com.risingwave.connector.EsSink7; -import com.risingwave.connector.EsSink7Config; +import com.risingwave.connector.EsSink; +import com.risingwave.connector.EsSinkConfig; import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.ArraySinkRow; import com.risingwave.proto.Data; @@ -39,7 +39,7 @@ import org.junit.Test; import org.testcontainers.elasticsearch.ElasticsearchContainer; -public class EsSink7Test { +public class EsSinkTest { static TableSchema getTestTableSchema() { return new TableSchema( @@ -52,9 +52,9 @@ static TableSchema getTestTableSchema() { public void testEsSink(ElasticsearchContainer container, String username, String password) throws IOException { - EsSink7 sink = - new EsSink7( - new EsSink7Config(container.getHttpHostAddress(), "test") + EsSink sink = + new EsSink( + new EsSinkConfig(container.getHttpHostAddress(), "test") .withDelimiter("$") .withUsername(username) .withPassword(password), diff --git a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/jdbc/JDBCSinkTest.java b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/jdbc/JDBCSinkTest.java index f38cd83b10e7c..da9b9d866583b 100644 --- a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/jdbc/JDBCSinkTest.java +++ b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/sink/jdbc/JDBCSinkTest.java @@ -74,7 +74,7 @@ static TableSchema getTestTableSchema() { static void testJDBCSync(JdbcDatabaseContainer container, TestType testType) throws SQLException { - String tableName = "test"; + String tableName = "test2"; createMockTable(container.getJdbcUrl(), tableName, testType); JDBCSink sink = new JDBCSink( @@ -97,12 +97,13 @@ static void testJDBCSync(JdbcDatabaseContainer container, TestType testType) sink.sync(); Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT * FROM test"); - int count; - for (count = 0; rs.next(); ) { - count++; + try (var rs = stmt.executeQuery(String.format("SELECT * FROM %s", tableName))) { + int count; + for (count = 0; rs.next(); ) { + count++; + } + assertEquals(1, count); } - assertEquals(1, count); sink.write( Iterators.forArray( @@ -116,12 +117,14 @@ static void testJDBCSync(JdbcDatabaseContainer container, TestType testType) "{\"key\": \"password\", \"value\": \"Singularity123\"}", "I want to sleep".getBytes()))); sink.sync(); - stmt = conn.createStatement(); - rs = stmt.executeQuery("SELECT * FROM test"); - for (count = 0; rs.next(); ) { - count++; + try (var rs = stmt.executeQuery(String.format("SELECT * FROM %s", tableName))) { + int count; + for (count = 0; rs.next(); ) { + count++; + } + assertEquals(2, count); } - assertEquals(2, count); + stmt.close(); sink.sync(); sink.drop(); @@ -129,7 +132,7 @@ static void testJDBCSync(JdbcDatabaseContainer container, TestType testType) static void testJDBCWrite(JdbcDatabaseContainer container, TestType testType) throws SQLException { - String tableName = "test"; + String tableName = "test1"; createMockTable(container.getJdbcUrl(), tableName, testType); JDBCSink sink = @@ -138,6 +141,7 @@ static void testJDBCWrite(JdbcDatabaseContainer container, TestType testType) getTestTableSchema()); assertEquals(tableName, sink.getTableName()); Connection conn = sink.getConn(); + Statement stmt = conn.createStatement(); sink.write( Iterators.forArray( @@ -158,7 +162,16 @@ static void testJDBCWrite(JdbcDatabaseContainer container, TestType testType) new Time(1000000000), new Timestamp(1000000000), "{\"key\": \"password\", \"value\": \"Singularity123\"}", - "I want to sleep".getBytes()), + "I want to sleep".getBytes()))); + + // chunk will commit after sink.write() + try (var rs = stmt.executeQuery(String.format("SELECT COUNT(*) FROM %s", tableName))) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + } + + sink.write( + Iterators.forArray( new ArraySinkRow( Op.UPDATE_DELETE, 1, @@ -186,22 +199,22 @@ static void testJDBCWrite(JdbcDatabaseContainer container, TestType testType) new Timestamp(1000000000), "{\"key\": \"password\", \"value\": \"Singularity123\"}", "I want to sleep".getBytes()))); - sink.sync(); - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT * FROM test"); - rs.next(); + try (var rs = stmt.executeQuery(String.format("SELECT * FROM %s", tableName))) { + assertTrue(rs.next()); - // check if rows are inserted - assertEquals(1, rs.getInt(1)); - assertEquals("Clare", rs.getString(2)); - assertEquals(new Date(2000000000).toString(), rs.getDate(3).toString()); - assertEquals(new Time(2000000000).toString(), rs.getTime(4).toString()); - assertEquals(new Timestamp(2000000000), rs.getTimestamp(5)); - assertEquals( - "{\"key\": \"password\", \"value\": \"Singularity123123123123\"}", rs.getString(6)); - assertEquals("I want to eat", new String(rs.getBytes(7))); - assertFalse(rs.next()); + // check if rows are inserted + assertEquals(1, rs.getInt(1)); + assertEquals("Clare", rs.getString(2)); + assertEquals(new Date(2000000000).toString(), rs.getDate(3).toString()); + assertEquals(new Time(2000000000).toString(), rs.getTime(4).toString()); + assertEquals(new Timestamp(2000000000), rs.getTimestamp(5)); + assertEquals( + "{\"key\": \"password\", \"value\": \"Singularity123123123123\"}", + rs.getString(6)); + assertEquals("I want to eat", new String(rs.getBytes(7))); + assertFalse(rs.next()); + } sink.sync(); stmt.close(); @@ -209,7 +222,7 @@ static void testJDBCWrite(JdbcDatabaseContainer container, TestType testType) static void testJDBCDrop(JdbcDatabaseContainer container, TestType testType) throws SQLException { - String tableName = "test"; + String tableName = "test3"; createMockTable(container.getJdbcUrl(), tableName, testType); JDBCSink sink = @@ -237,8 +250,8 @@ public void testPostgres() throws SQLException { .withUrlParam("user", "postgres") .withUrlParam("password", "password"); pg.start(); - testJDBCSync(pg, TestType.TestPg); testJDBCWrite(pg, TestType.TestPg); + testJDBCSync(pg, TestType.TestPg); testJDBCDrop(pg, TestType.TestPg); pg.stop(); } @@ -254,8 +267,8 @@ public void testMySQL() throws SQLException { .withUrlParam("user", "postgres") .withUrlParam("password", "password"); mysql.start(); - testJDBCSync(mysql, TestType.TestMySQL); testJDBCWrite(mysql, TestType.TestMySQL); + testJDBCSync(mysql, TestType.TestMySQL); testJDBCDrop(mysql, TestType.TestMySQL); mysql.stop(); } diff --git a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/MySQLSourceTest.java b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/MySQLSourceTest.java index 294ebffcc70e8..92028255d28e3 100644 --- a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/MySQLSourceTest.java +++ b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/MySQLSourceTest.java @@ -124,10 +124,12 @@ public void testLines() throws InterruptedException, SQLException { int count = 0; while (eventStream.hasNext()) { List messages = eventStream.next().getEventsList(); - for (CdcMessage ignored : messages) { - count++; + for (CdcMessage msg : messages) { + if (!msg.getPayload().isBlank()) { + count++; + } } - if (count == 10000) { + if (count >= 10000) { return count; } } diff --git a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/PostgresSourceTest.java b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/PostgresSourceTest.java index b673f533948ee..4fff4e7f50ad0 100644 --- a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/PostgresSourceTest.java +++ b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/PostgresSourceTest.java @@ -130,10 +130,12 @@ public void testLines() throws Exception { while (eventStream.hasNext()) { List messages = eventStream.next().getEventsList(); - for (ConnectorServiceProto.CdcMessage ignored : messages) { - count++; + for (ConnectorServiceProto.CdcMessage msg : messages) { + if (!msg.getPayload().isBlank()) { + count++; + } } - if (count == 10000) { + if (count >= 10000) { return count; } } diff --git a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/SourceTestClient.java b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/SourceTestClient.java index 738d0f850a39b..359746bc90f77 100644 --- a/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/SourceTestClient.java +++ b/java/connector-node/risingwave-connector-test/src/test/java/com/risingwave/connector/source/SourceTestClient.java @@ -41,7 +41,7 @@ public class SourceTestClient { static final Logger LOG = LoggerFactory.getLogger(SourceTestClient.class.getName()); // default port for connector service - static final int DEFAULT_PORT = 50051; + static final int DEFAULT_PORT = 60051; private final ConnectorServiceGrpc.ConnectorServiceBlockingStub blockingStub; public Properties sqlStmts = new Properties(); diff --git a/java/connector-node/risingwave-sink-cassandra/pom.xml b/java/connector-node/risingwave-sink-cassandra/pom.xml index e51faa9691cf9..82119156100cd 100644 --- a/java/connector-node/risingwave-sink-cassandra/pom.xml +++ b/java/connector-node/risingwave-sink-cassandra/pom.xml @@ -1,25 +1,30 @@ - + - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml 4.0.0 risingwave-sink-cassandra - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT risingwave-sink-cassandra + + true + + - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave connector-api @@ -57,4 +62,4 @@ - \ No newline at end of file + diff --git a/java/connector-node/risingwave-sink-deltalake/pom.xml b/java/connector-node/risingwave-sink-deltalake/pom.xml index cfa73c652fa5b..b9fc1c175615e 100644 --- a/java/connector-node/risingwave-sink-deltalake/pom.xml +++ b/java/connector-node/risingwave-sink-deltalake/pom.xml @@ -3,15 +3,15 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - com.risingwave.java - java-parent - 1.0-SNAPSHOT + com.risingwave + risingwave-java-root + 0.1.0-SNAPSHOT ../../pom.xml 4.0.0 risingwave-sink-deltalake - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT risingwave-sink-deltalake @@ -19,24 +19,25 @@ 11 11 1.12.3 + true - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave s3-common provided - com.risingwave.java + com.risingwave common-utils - com.risingwave.java + com.risingwave connector-api diff --git a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java index 1b3a7c28d97a9..413edeb10df81 100644 --- a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java +++ b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java @@ -75,27 +75,24 @@ public void write(Iterator rows) { } } while (rows.hasNext()) { - try (SinkRow row = rows.next()) { - switch (row.getOp()) { - case INSERT: - GenericRecord record = new GenericData.Record(this.sinkSchema); - for (int i = 0; i < this.sinkSchema.getFields().size(); i++) { - record.put(i, row.get(i)); - } - try { - this.parquetWriter.write(record); - this.numOutputRows += 1; - } catch (IOException ioException) { - throw INTERNAL.withCause(ioException).asRuntimeException(); - } - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) - .asRuntimeException(); - } - } catch (Exception e) { - throw new RuntimeException(e); + SinkRow row = rows.next(); + switch (row.getOp()) { + case INSERT: + GenericRecord record = new GenericData.Record(this.sinkSchema); + for (int i = 0; i < this.sinkSchema.getFields().size(); i++) { + record.put(i, row.get(i)); + } + try { + this.parquetWriter.write(record); + this.numOutputRows += 1; + } catch (IOException ioException) { + throw INTERNAL.withCause(ioException).asRuntimeException(); + } + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); } } } diff --git a/java/connector-node/risingwave-sink-es-7/pom.xml b/java/connector-node/risingwave-sink-es-7/pom.xml index 04694ed98bb04..ca8a264e53a94 100644 --- a/java/connector-node/risingwave-sink-es-7/pom.xml +++ b/java/connector-node/risingwave-sink-es-7/pom.xml @@ -1,25 +1,30 @@ - - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml 4.0.0 risingwave-sink-es-7 - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT risingwave-sink-es-7 + + true + + - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave connector-api @@ -60,4 +65,4 @@ - \ No newline at end of file + diff --git a/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7.java b/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink.java similarity index 79% rename from java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7.java rename to java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink.java index 44f1610ceaaba..f9c266f0af117 100644 --- a/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7.java +++ b/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink.java @@ -14,6 +14,10 @@ package com.risingwave.connector; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.SinkRow; import com.risingwave.connector.api.sink.SinkWriterBase; @@ -36,6 +40,7 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.RestHighLevelClientBuilder; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; @@ -55,17 +60,17 @@ * * 4. bulkprocessor and high-level-client are deprecated in es 8 java api. */ -public class EsSink7 extends SinkWriterBase { - private static final Logger LOG = LoggerFactory.getLogger(EsSink7.class); +public class EsSink extends SinkWriterBase { + private static final Logger LOG = LoggerFactory.getLogger(EsSink.class); private static final String ERROR_REPORT_TEMPLATE = "Error when exec %s, message %s"; - private final EsSink7Config config; + private final EsSinkConfig config; private final BulkProcessor bulkProcessor; private final RestHighLevelClient client; // For bulk listener private final List primaryKeyIndexes; - public EsSink7(EsSink7Config config, TableSchema tableSchema) { + public EsSink(EsSinkConfig config, TableSchema tableSchema) { super(tableSchema); HttpHost host; try { @@ -75,9 +80,14 @@ public EsSink7(EsSink7Config config, TableSchema tableSchema) { } this.config = config; + + // ApiCompatibilityMode is enabled to ensure the client can talk to newer version es sever. this.client = - new RestHighLevelClient( - configureRestClientBuilder(RestClient.builder(host), config)); + new RestHighLevelClientBuilder( + configureRestClientBuilder(RestClient.builder(host), config) + .build()) + .setApiCompatibilityMode(true) + .build(); // Test connection try { boolean isConnected = this.client.ping(RequestOptions.DEFAULT); @@ -98,7 +108,7 @@ public EsSink7(EsSink7Config config, TableSchema tableSchema) { } private static RestClientBuilder configureRestClientBuilder( - RestClientBuilder builder, EsSink7Config config) { + RestClientBuilder builder, EsSinkConfig config) { // Possible config: // 1. Connection path prefix // 2. Username and password @@ -116,7 +126,7 @@ private static RestClientBuilder configureRestClientBuilder( } private BulkProcessor.Builder applyBulkConfig( - RestHighLevelClient client, EsSink7Config config, BulkProcessor.Listener listener) { + RestHighLevelClient client, EsSinkConfig config, BulkProcessor.Listener listener) { BulkProcessor.Builder builder = BulkProcessor.builder( (BulkRequestConsumerFactory) @@ -177,11 +187,36 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) * * @param row * @return Map from Field name to Value + * @throws JsonProcessingException + * @throws JsonMappingException */ - private Map buildDoc(SinkRow row) { + private Map buildDoc(SinkRow row) + throws JsonMappingException, JsonProcessingException { Map doc = new HashMap(); - for (int i = 0; i < getTableSchema().getNumColumns(); i++) { - doc.put(getTableSchema().getColumnDesc(i).getName(), row.get(i)); + var tableSchema = getTableSchema(); + var columnDescs = tableSchema.getColumnDescs(); + for (int i = 0; i < row.size(); i++) { + var type = columnDescs.get(i).getDataType().getTypeName(); + Object col = row.get(i); + switch (type) { + case DATE: + // es client doesn't natively support java.sql.Timestamp/Time/Date + // so we need to convert Date type into a string as suggested in + // https://github.com/elastic/elasticsearch/issues/31377#issuecomment-398102292 + col = col.toString(); + break; + case JSONB: + ObjectMapper mapper = new ObjectMapper(); + col = + mapper.readValue( + (String) col, new TypeReference>() {}); + break; + default: + break; + } + if (col instanceof Date) {} + + doc.put(getTableSchema().getColumnDesc(i).getName(), col); } return doc; } @@ -206,7 +241,7 @@ private String buildId(SinkRow row) { return id; } - private void processUpsert(SinkRow row) { + private void processUpsert(SinkRow row) throws JsonMappingException, JsonProcessingException { Map doc = buildDoc(row); final String key = buildId(row); @@ -221,7 +256,7 @@ private void processDelete(SinkRow row) { bulkProcessor.add(deleteRequest); } - private void writeRow(SinkRow row) { + private void writeRow(SinkRow row) throws JsonMappingException, JsonProcessingException { switch (row.getOp()) { case INSERT: case UPDATE_INSERT: @@ -241,10 +276,11 @@ private void writeRow(SinkRow row) { @Override public void write(Iterator rows) { while (rows.hasNext()) { - try (SinkRow row = rows.next()) { + SinkRow row = rows.next(); + try { writeRow(row); - } catch (Exception e) { - throw new RuntimeException(e); + } catch (Exception ex) { + throw new RuntimeException(ex); } } } diff --git a/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7Config.java b/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSinkConfig.java similarity index 87% rename from java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7Config.java rename to java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSinkConfig.java index 6d45b3ee52d87..e053dfed77b63 100644 --- a/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7Config.java +++ b/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSinkConfig.java @@ -20,7 +20,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.risingwave.connector.api.sink.CommonSinkConfig; -public class EsSink7Config extends CommonSinkConfig { +public class EsSinkConfig extends CommonSinkConfig { /** Required */ private String url; @@ -38,7 +38,7 @@ public class EsSink7Config extends CommonSinkConfig { private String password; @JsonCreator - public EsSink7Config( + public EsSinkConfig( @JsonProperty(value = "url") String url, @JsonProperty(value = "index") String index) { this.url = url; this.index = index; @@ -64,17 +64,17 @@ public String getPassword() { return password; } - public EsSink7Config withDelimiter(String delimiter) { + public EsSinkConfig withDelimiter(String delimiter) { this.delimiter = delimiter; return this; } - public EsSink7Config withUsername(String username) { + public EsSinkConfig withUsername(String username) { this.username = username; return this; } - public EsSink7Config withPassword(String password) { + public EsSinkConfig withPassword(String password) { this.password = password; return this; } diff --git a/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7Factory.java b/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSinkFactory.java similarity index 91% rename from java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7Factory.java rename to java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSinkFactory.java index 4bc6fa6cc1990..a31826a45a5ab 100644 --- a/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSink7Factory.java +++ b/java/connector-node/risingwave-sink-es-7/src/main/java/com/risingwave/connector/EsSinkFactory.java @@ -36,13 +36,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class EsSink7Factory implements SinkFactory { - private static final Logger LOG = LoggerFactory.getLogger(EsSink7Factory.class); +public class EsSinkFactory implements SinkFactory { + private static final Logger LOG = LoggerFactory.getLogger(EsSinkFactory.class); public SinkWriter createWriter(TableSchema tableSchema, Map tableProperties) { ObjectMapper mapper = new ObjectMapper(); - EsSink7Config config = mapper.convertValue(tableProperties, EsSink7Config.class); - return new SinkWriterV1.Adapter(new EsSink7(config, tableSchema)); + EsSinkConfig config = mapper.convertValue(tableProperties, EsSinkConfig.class); + return new SinkWriterV1.Adapter(new EsSink(config, tableSchema)); } @Override @@ -52,7 +52,7 @@ public void validate( Catalog.SinkType sinkType) { ObjectMapper mapper = new ObjectMapper(); mapper.configure(DeserializationFeature.FAIL_ON_MISSING_CREATOR_PROPERTIES, true); - EsSink7Config config = mapper.convertValue(tableProperties, EsSink7Config.class); + EsSinkConfig config = mapper.convertValue(tableProperties, EsSinkConfig.class); // 1. check url HttpHost host; diff --git a/java/connector-node/risingwave-sink-iceberg/pom.xml b/java/connector-node/risingwave-sink-iceberg/pom.xml index 818144377656d..d89d393ea7392 100644 --- a/java/connector-node/risingwave-sink-iceberg/pom.xml +++ b/java/connector-node/risingwave-sink-iceberg/pom.xml @@ -3,15 +3,15 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml 4.0.0 risingwave-sink-iceberg - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT risingwave-sink-iceberg @@ -19,23 +19,24 @@ 1.0.0 11 11 + true - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave common-utils - com.risingwave.java + com.risingwave connector-api - com.risingwave.java + com.risingwave s3-common provided diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/AppendOnlyIcebergSinkWriter.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/AppendOnlyIcebergSinkWriter.java index 6a6aad0a460e0..6b60eedd23d37 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/AppendOnlyIcebergSinkWriter.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/AppendOnlyIcebergSinkWriter.java @@ -55,62 +55,57 @@ public AppendOnlyIcebergSinkWriter( @Override public void write(Iterator rows) { while (rows.hasNext()) { - try (SinkRow row = rows.next()) { - switch (row.getOp()) { - case INSERT: - Record record = GenericRecord.create(rowSchema); - if (row.size() != tableSchema.getColumnNames().length) { - throw INTERNAL.withDescription("row values do not match table schema") + SinkRow row = rows.next(); + switch (row.getOp()) { + case INSERT: + Record record = GenericRecord.create(rowSchema); + if (row.size() != tableSchema.getColumnNames().length) { + throw INTERNAL.withDescription("row values do not match table schema") + .asRuntimeException(); + } + for (int i = 0; i < rowSchema.columns().size(); i++) { + record.set(i, row.get(i)); + } + PartitionKey partitionKey = + new PartitionKey(icebergTable.spec(), icebergTable.schema()); + partitionKey.partition(record); + DataWriter dataWriter; + if (dataWriterMap.containsKey(partitionKey)) { + dataWriter = dataWriterMap.get(partitionKey); + } else { + try { + String filename = fileFormat.addExtension(UUID.randomUUID().toString()); + OutputFile outputFile = + icebergTable + .io() + .newOutputFile( + icebergTable.location() + + "/data/" + + icebergTable + .spec() + .partitionToPath(partitionKey) + + "/" + + filename); + dataWriter = + Parquet.writeData(outputFile) + .schema(rowSchema) + .withSpec(icebergTable.spec()) + .withPartition(partitionKey) + .createWriterFunc(GenericParquetWriter::buildWriter) + .overwrite() + .build(); + } catch (Exception e) { + throw INTERNAL.withDescription("failed to create dataWriter") .asRuntimeException(); } - for (int i = 0; i < rowSchema.columns().size(); i++) { - record.set(i, row.get(i)); - } - PartitionKey partitionKey = - new PartitionKey(icebergTable.spec(), icebergTable.schema()); - partitionKey.partition(record); - DataWriter dataWriter; - if (dataWriterMap.containsKey(partitionKey)) { - dataWriter = dataWriterMap.get(partitionKey); - } else { - try { - String filename = - fileFormat.addExtension(UUID.randomUUID().toString()); - OutputFile outputFile = - icebergTable - .io() - .newOutputFile( - icebergTable.location() - + "/data/" - + icebergTable - .spec() - .partitionToPath( - partitionKey) - + "/" - + filename); - dataWriter = - Parquet.writeData(outputFile) - .schema(rowSchema) - .withSpec(icebergTable.spec()) - .withPartition(partitionKey) - .createWriterFunc(GenericParquetWriter::buildWriter) - .overwrite() - .build(); - } catch (Exception e) { - throw INTERNAL.withDescription("failed to create dataWriter") - .asRuntimeException(); - } - dataWriterMap.put(partitionKey, dataWriter); - } - dataWriter.write(record); - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) - .asRuntimeException(); - } - } catch (Exception e) { - throw new RuntimeException(e); + dataWriterMap.put(partitionKey, dataWriter); + } + dataWriter.write(record); + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); } } } diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSinkWriter.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSinkWriter.java index 10fca804acf64..e1d649f028bf8 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSinkWriter.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSinkWriter.java @@ -142,57 +142,52 @@ private List> getKeyFromRow(SinkRow row) { @Override public void write(Iterator rows) { while (rows.hasNext()) { - try (SinkRow row = rows.next()) { - if (row.size() != tableSchema.getColumnNames().length) { - throw Status.FAILED_PRECONDITION - .withDescription("row values do not match table schema") - .asRuntimeException(); - } - Record record = newRecord(rowSchema, row); - PartitionKey partitionKey = - new PartitionKey(icebergTable.spec(), icebergTable.schema()); - partitionKey.partition(record); - SinkRowMap sinkRowMap; - if (sinkRowMapByPartition.containsKey(partitionKey)) { - sinkRowMap = sinkRowMapByPartition.get(partitionKey); - } else { - sinkRowMap = new SinkRowMap(); - sinkRowMapByPartition.put(partitionKey, sinkRowMap); - } - switch (row.getOp()) { - case INSERT: - sinkRowMap.insert(getKeyFromRow(row), newRecord(rowSchema, row)); - break; - case DELETE: - sinkRowMap.delete(getKeyFromRow(row), newRecord(deleteRowSchema, row)); - break; - case UPDATE_DELETE: - if (updateBufferExists) { - throw Status.FAILED_PRECONDITION - .withDescription( - "an UPDATE_INSERT should precede an UPDATE_DELETE") - .asRuntimeException(); - } - sinkRowMap.delete(getKeyFromRow(row), newRecord(deleteRowSchema, row)); - updateBufferExists = true; - break; - case UPDATE_INSERT: - if (!updateBufferExists) { - throw Status.FAILED_PRECONDITION - .withDescription( - "an UPDATE_INSERT should precede an UPDATE_DELETE") - .asRuntimeException(); - } - sinkRowMap.insert(getKeyFromRow(row), newRecord(rowSchema, row)); - updateBufferExists = false; - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) + SinkRow row = rows.next(); + if (row.size() != tableSchema.getColumnNames().length) { + throw Status.FAILED_PRECONDITION + .withDescription("row values do not match table schema") + .asRuntimeException(); + } + Record record = newRecord(rowSchema, row); + PartitionKey partitionKey = + new PartitionKey(icebergTable.spec(), icebergTable.schema()); + partitionKey.partition(record); + SinkRowMap sinkRowMap; + if (sinkRowMapByPartition.containsKey(partitionKey)) { + sinkRowMap = sinkRowMapByPartition.get(partitionKey); + } else { + sinkRowMap = new SinkRowMap(); + sinkRowMapByPartition.put(partitionKey, sinkRowMap); + } + switch (row.getOp()) { + case INSERT: + sinkRowMap.insert(getKeyFromRow(row), newRecord(rowSchema, row)); + break; + case DELETE: + sinkRowMap.delete(getKeyFromRow(row), newRecord(deleteRowSchema, row)); + break; + case UPDATE_DELETE: + if (updateBufferExists) { + throw Status.FAILED_PRECONDITION + .withDescription("an UPDATE_INSERT should precede an UPDATE_DELETE") .asRuntimeException(); - } - } catch (Exception e) { - throw new RuntimeException(e); + } + sinkRowMap.delete(getKeyFromRow(row), newRecord(deleteRowSchema, row)); + updateBufferExists = true; + break; + case UPDATE_INSERT: + if (!updateBufferExists) { + throw Status.FAILED_PRECONDITION + .withDescription("an UPDATE_INSERT should precede an UPDATE_DELETE") + .asRuntimeException(); + } + sinkRowMap.insert(getKeyFromRow(row), newRecord(rowSchema, row)); + updateBufferExists = false; + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); } } } diff --git a/java/connector-node/risingwave-sink-jdbc/pom.xml b/java/connector-node/risingwave-sink-jdbc/pom.xml index eda76fbdd59af..bcdd8fd67fbae 100644 --- a/java/connector-node/risingwave-sink-jdbc/pom.xml +++ b/java/connector-node/risingwave-sink-jdbc/pom.xml @@ -1,25 +1,30 @@ - + 4.0.0 - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml risingwave-sink-jdbc - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT risingwave-sink-jdbc + + true + + - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave connector-api diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java index 216503b8d824b..ea8429536c03c 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java @@ -18,13 +18,10 @@ import com.risingwave.connector.api.sink.SinkRow; import com.risingwave.connector.api.sink.SinkWriterBase; import com.risingwave.connector.jdbc.JdbcDialect; -import com.risingwave.connector.jdbc.JdbcDialectFactory; import com.risingwave.proto.Data; import io.grpc.Status; import java.sql.*; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; +import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,13 +32,16 @@ public class JDBCSink extends SinkWriterBase { private final JDBCSinkConfig config; private final Connection conn; private final List pkColumnNames; + public static final String JDBC_COLUMN_NAME_KEY = "COLUMN_NAME"; + public static final String JDBC_DATA_TYPE_KEY = "DATA_TYPE"; private PreparedStatement insertPreparedStmt; private PreparedStatement upsertPreparedStmt; private PreparedStatement deletePreparedStmt; private boolean updateFlag = false; + private static final Logger LOG = LoggerFactory.getLogger(JDBCSink.class); public JDBCSink(JDBCSinkConfig config, TableSchema tableSchema) { @@ -49,19 +49,40 @@ public JDBCSink(JDBCSinkConfig config, TableSchema tableSchema) { var jdbcUrl = config.getJdbcUrl().toLowerCase(); var factory = JdbcUtils.getDialectFactory(jdbcUrl); - this.jdbcDialect = - factory.map(JdbcDialectFactory::create) - .orElseThrow( - () -> - Status.INVALID_ARGUMENT - .withDescription("Unsupported jdbc url: " + jdbcUrl) - .asRuntimeException()); this.config = config; try { this.conn = DriverManager.getConnection(config.getJdbcUrl()); - this.conn.setAutoCommit(false); this.pkColumnNames = getPkColumnNames(conn, config.getTableName(), config.getSchemaName()); + // column name -> java.sql.Types + Map columnTypeMapping = + getColumnTypeMapping(conn, config.getTableName(), config.getSchemaName()); + + // create an array that each slot corresponding to each column in TableSchema + var columnSqlTypes = new int[tableSchema.getNumColumns()]; + for (int columnIdx = 0; columnIdx < tableSchema.getNumColumns(); columnIdx++) { + var columnName = tableSchema.getColumnNames()[columnIdx]; + columnSqlTypes[columnIdx] = columnTypeMapping.get(columnName); + } + LOG.info("columnSqlTypes: {}", Arrays.toString(columnSqlTypes)); + + if (factory.isPresent()) { + this.jdbcDialect = factory.get().create(columnSqlTypes); + } else { + throw Status.INVALID_ARGUMENT + .withDescription("Unsupported jdbc url: " + jdbcUrl) + .asRuntimeException(); + } + + // disable auto commit can improve performance + this.conn.setAutoCommit(false); + // explicitly set isolation level to RC + this.conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + + LOG.info( + "JDBC connection: autoCommit = {}, trxn = {}", + conn.getAutoCommit(), + conn.getTransactionIsolation()); } catch (SQLException e) { throw Status.INTERNAL .withDescription( @@ -106,6 +127,28 @@ public JDBCSink(JDBCSinkConfig config, TableSchema tableSchema) { } } + private static Map getColumnTypeMapping( + Connection conn, String tableName, String schemaName) { + Map columnTypeMap = new HashMap<>(); + try { + ResultSet columnResultSet = + conn.getMetaData().getColumns(null, schemaName, tableName, null); + + while (columnResultSet.next()) { + columnTypeMap.put( + columnResultSet.getString(JDBC_COLUMN_NAME_KEY), + columnResultSet.getInt(JDBC_DATA_TYPE_KEY)); + } + } catch (SQLException e) { + throw Status.INTERNAL + .withDescription( + String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); + } + LOG.info("detected column type mapping {}", columnTypeMap); + return columnTypeMap; + } + private static List getPkColumnNames( Connection conn, String tableName, String schemaName) { List pkColumnNames = new ArrayList<>(); @@ -133,6 +176,7 @@ private PreparedStatement prepareInsertStatement(SinkRow row) { try { var preparedStmt = insertPreparedStmt; jdbcDialect.bindInsertIntoStatement(preparedStmt, conn, getTableSchema(), row); + preparedStmt.addBatch(); return preparedStmt; } catch (SQLException e) { throw io.grpc.Status.INTERNAL @@ -149,7 +193,7 @@ private PreparedStatement prepareUpsertStatement(SinkRow row) { switch (row.getOp()) { case INSERT: jdbcDialect.bindUpsertStatement(preparedStmt, conn, getTableSchema(), row); - return preparedStmt; + break; case UPDATE_INSERT: if (!updateFlag) { throw Status.FAILED_PRECONDITION @@ -158,12 +202,14 @@ private PreparedStatement prepareUpsertStatement(SinkRow row) { } jdbcDialect.bindUpsertStatement(preparedStmt, conn, getTableSchema(), row); updateFlag = false; - return preparedStmt; + break; default: throw Status.FAILED_PRECONDITION .withDescription("unexpected op type: " + row.getOp()) .asRuntimeException(); } + preparedStmt.addBatch(); + return preparedStmt; } catch (SQLException e) { throw io.grpc.Status.INTERNAL .withDescription( @@ -192,6 +238,7 @@ private PreparedStatement prepareDeleteStatement(SinkRow row) { Object fromRow = getTableSchema().getFromRow(primaryKey, row); deletePreparedStmt.setObject(placeholderIdx++, fromRow); } + deletePreparedStmt.addBatch(); return deletePreparedStmt; } catch (SQLException e) { throw Status.INTERNAL @@ -203,48 +250,50 @@ private PreparedStatement prepareDeleteStatement(SinkRow row) { @Override public void write(Iterator rows) { - while (rows.hasNext()) { - try (SinkRow row = rows.next()) { - PreparedStatement stmt; - if (row.getOp() == Data.Op.UPDATE_DELETE) { - updateFlag = true; - continue; - } + PreparedStatement deleteStatement = null; + PreparedStatement upsertStatement = null; + PreparedStatement insertStatement = null; - if (config.isUpsertSink()) { - stmt = prepareForUpsert(row); + while (rows.hasNext()) { + SinkRow row = rows.next(); + if (row.getOp() == Data.Op.UPDATE_DELETE) { + updateFlag = true; + continue; + } + if (config.isUpsertSink()) { + if (row.getOp() == Data.Op.DELETE) { + deleteStatement = prepareDeleteStatement(row); } else { - stmt = prepareForAppendOnly(row); + upsertStatement = prepareUpsertStatement(row); } - - try { - LOG.debug("Executing statement: {}", stmt); - stmt.executeUpdate(); - stmt.clearParameters(); - } catch (SQLException e) { - throw Status.INTERNAL - .withDescription( - String.format(ERROR_REPORT_TEMPLATE, stmt, e.getMessage())) - .asRuntimeException(); - } - } catch (Exception e) { - throw new RuntimeException(e); + } else { + insertStatement = prepareInsertStatement(row); } } - } - private PreparedStatement prepareForUpsert(SinkRow row) { - PreparedStatement stmt; - if (row.getOp() == Data.Op.DELETE) { - stmt = prepareDeleteStatement(row); - } else { - stmt = prepareUpsertStatement(row); + try { + // Execute staging statements after all rows are prepared. + // We execute DELETE statement before to avoid accidentally deletion. + executeStatement(deleteStatement); + executeStatement(upsertStatement); + executeStatement(insertStatement); + + conn.commit(); + } catch (SQLException e) { + throw io.grpc.Status.INTERNAL + .withDescription( + String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } - return stmt; } - private PreparedStatement prepareForAppendOnly(SinkRow row) { - return prepareInsertStatement(row); + private void executeStatement(PreparedStatement stmt) throws SQLException { + if (stmt == null) { + return; + } + LOG.debug("Executing statement: {}", stmt); + stmt.executeBatch(); + stmt.clearParameters(); } @Override @@ -255,14 +304,6 @@ public void sync() { "expected UPDATE_INSERT to complete an UPDATE operation, got `sync`") .asRuntimeException(); } - try { - conn.commit(); - } catch (SQLException e) { - throw io.grpc.Status.INTERNAL - .withDescription( - String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) - .asRuntimeException(); - } } @Override diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/JdbcDialectFactory.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/JdbcDialectFactory.java index 22cfb6616be9a..49ac6d3d309ba 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/JdbcDialectFactory.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/JdbcDialectFactory.java @@ -16,5 +16,5 @@ public interface JdbcDialectFactory { - JdbcDialect create(); + JdbcDialect create(int[] columnSqlTypes); } diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/MySqlDialectFactory.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/MySqlDialectFactory.java index c007ee3f2fccd..0bfbfa857c3e4 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/MySqlDialectFactory.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/MySqlDialectFactory.java @@ -17,7 +17,7 @@ public class MySqlDialectFactory implements JdbcDialectFactory { @Override - public JdbcDialect create() { + public JdbcDialect create(int[] columnSqlTypes) { return new MySqlDialect(); } } diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialect.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialect.java index b93c3d2ab3824..570e2beaf5a67 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialect.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialect.java @@ -27,6 +27,12 @@ public class PostgresDialect implements JdbcDialect { + private final int[] columnSqlTypes; + + public PostgresDialect(int[] columnSqlTypes) { + this.columnSqlTypes = columnSqlTypes; + } + @Override public SchemaTableName createSchemaTableName(String schemaName, String tableName) { if (schemaName == null || schemaName.isBlank()) { @@ -84,36 +90,42 @@ public void bindInsertIntoStatement( throws SQLException { var columnDescs = tableSchema.getColumnDescs(); int placeholderIdx = 1; - for (int i = 0; i < row.size(); i++) { - var column = columnDescs.get(i); + for (int columnIdx = 0; columnIdx < row.size(); columnIdx++) { + var column = columnDescs.get(columnIdx); switch (column.getDataType().getTypeName()) { case DECIMAL: - stmt.setBigDecimal(placeholderIdx++, (java.math.BigDecimal) row.get(i)); + stmt.setBigDecimal(placeholderIdx++, (java.math.BigDecimal) row.get(columnIdx)); break; case INTERVAL: - stmt.setObject(placeholderIdx++, new PGInterval((String) row.get(i))); + stmt.setObject(placeholderIdx++, new PGInterval((String) row.get(columnIdx))); break; case JSONB: // reference: https://github.com/pgjdbc/pgjdbc/issues/265 var pgObj = new PGobject(); pgObj.setType("jsonb"); - pgObj.setValue((String) row.get(i)); + pgObj.setValue((String) row.get(columnIdx)); stmt.setObject(placeholderIdx++, pgObj); break; case BYTEA: - stmt.setBytes(placeholderIdx++, (byte[]) row.get(i)); + stmt.setBytes(placeholderIdx++, (byte[]) row.get(columnIdx)); break; case LIST: - var val = row.get(i); + var val = row.get(columnIdx); assert (val instanceof Object[]); Object[] objArray = (Object[]) val; assert (column.getDataType().getFieldTypeCount() == 1); var fieldType = column.getDataType().getFieldType(0); stmt.setArray( - i + 1, conn.createArrayOf(fieldType.getTypeName().name(), objArray)); + placeholderIdx++, + conn.createArrayOf(fieldType.getTypeName().name(), objArray)); + break; + case VARCHAR: + // since VARCHAR column may sink to a UUID column, we get the target type + // from the mapping which should be Types.OTHER. + stmt.setObject(placeholderIdx++, row.get(columnIdx), columnSqlTypes[columnIdx]); break; default: - stmt.setObject(placeholderIdx++, row.get(i)); + stmt.setObject(placeholderIdx++, row.get(columnIdx)); break; } } diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialectFactory.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialectFactory.java index 5e55ebef858d1..7e1908e7e8c2a 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialectFactory.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/jdbc/PostgresDialectFactory.java @@ -17,7 +17,7 @@ public class PostgresDialectFactory implements JdbcDialectFactory { @Override - public JdbcDialect create() { - return new PostgresDialect(); + public JdbcDialect create(int[] columnSqlTypes) { + return new PostgresDialect(columnSqlTypes); } } diff --git a/java/connector-node/risingwave-source-cdc/pom.xml b/java/connector-node/risingwave-source-cdc/pom.xml index 3dfccc269e209..683f472a65b14 100644 --- a/java/connector-node/risingwave-source-cdc/pom.xml +++ b/java/connector-node/risingwave-source-cdc/pom.xml @@ -1,16 +1,17 @@ - 4.0.0 - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../../pom.xml risingwave-source-cdc - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT risingwave-source-cdc diff --git a/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/internal/DebeziumOffset.java b/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/internal/DebeziumOffset.java index 6555df7d8649d..670765105cf66 100644 --- a/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/internal/DebeziumOffset.java +++ b/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/internal/DebeziumOffset.java @@ -43,12 +43,15 @@ public class DebeziumOffset implements Serializable { public Map sourcePartition; public Map sourceOffset; + public boolean isHeartbeat; public DebeziumOffset() {} - public DebeziumOffset(Map sourcePartition, Map sourceOffset) { + public DebeziumOffset( + Map sourcePartition, Map sourceOffset, boolean isHeartbeat) { this.sourcePartition = sourcePartition; this.sourceOffset = sourceOffset; + this.isHeartbeat = isHeartbeat; } public void setSourcePartition(Map sourcePartition) { @@ -59,10 +62,16 @@ public void setSourceOffset(Map sourceOffset) { this.sourceOffset = sourceOffset; } + public void setHeartbeat(boolean heartbeat) { + isHeartbeat = heartbeat; + } + @Override public String toString() { return "DebeziumOffset{" - + "sourcePartition=" + + "isHeartbeat=" + + isHeartbeat + + ", sourcePartition=" + sourcePartition + ", sourceOffset=" + sourceOffset diff --git a/java/connector-node/s3-common/pom.xml b/java/connector-node/s3-common/pom.xml index 7e39d79e3c98a..b1c8033fc1f84 100644 --- a/java/connector-node/s3-common/pom.xml +++ b/java/connector-node/s3-common/pom.xml @@ -1,12 +1,12 @@ + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - com.risingwave.java - java-parent - 1.0-SNAPSHOT + com.risingwave + risingwave-java-root + 0.1.0-SNAPSHOT ../../pom.xml @@ -28,7 +28,7 @@ hadoop-aws - com.risingwave.java + com.risingwave connector-api diff --git a/java/dev.md b/java/dev.md index 148fde173baad..ac20c30fe69fa 100644 --- a/java/dev.md +++ b/java/dev.md @@ -56,3 +56,9 @@ Config with the following. It may work. "java.format.settings.profile": "Android" } ``` + +## Deploy UDF Library to Maven + +```sh +mvn clean deploy --pl udf --am +``` \ No newline at end of file diff --git a/java/java-binding-benchmark/pom.xml b/java/java-binding-benchmark/pom.xml index dadb6b8e85ef7..80303bc914e67 100644 --- a/java/java-binding-benchmark/pom.xml +++ b/java/java-binding-benchmark/pom.xml @@ -1,18 +1,23 @@ - 4.0.0 - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT + + true + + java-binding-benchmark jar - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT java-binding-benchmark http://maven.apache.org @@ -33,7 +38,7 @@ test - com.risingwave.java + com.risingwave java-binding diff --git a/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/ArrayListBenchmark.java b/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/ArrayListBenchmark.java index 6540033371d34..c05cf23d2c582 100644 --- a/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/ArrayListBenchmark.java +++ b/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/ArrayListBenchmark.java @@ -20,8 +20,8 @@ import java.util.concurrent.TimeUnit; import org.openjdk.jmh.annotations.*; -@Warmup(iterations = 10, time = 1, timeUnit = TimeUnit.MILLISECONDS) -@Measurement(iterations = 20, time = 1, timeUnit = TimeUnit.MILLISECONDS) +@Warmup(iterations = 2, time = 1, timeUnit = TimeUnit.MILLISECONDS, batchSize = 10) +@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.MILLISECONDS, batchSize = 10) @Fork(value = 1) @BenchmarkMode(org.openjdk.jmh.annotations.Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) @@ -30,8 +30,6 @@ public class ArrayListBenchmark { @Param({"100", "1000", "10000"}) static int loopTime; - ArrayList> data = new ArrayList<>(); - public ArrayList getRow(int index) { short v1 = (short) index; int v2 = (int) index; @@ -61,17 +59,10 @@ public void getValue(ArrayList rowData) { Integer mayNull = (Integer) rowData.get(6); } - @Setup - public void setup() { - for (int i = 0; i < loopTime; i++) { - data.add(getRow(i)); - } - } - @Benchmark public void arrayListTest() { for (int i = 0; i < loopTime; i++) { - getValue(data.get(i)); + getValue(getRow(i)); } } } diff --git a/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/StreamchunkBenchmark.java b/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/StreamchunkBenchmark.java index 8741044f7b34e..9ca6c4781983f 100644 --- a/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/StreamchunkBenchmark.java +++ b/java/java-binding-benchmark/src/main/java/com/risingwave/java/binding/StreamchunkBenchmark.java @@ -16,31 +16,37 @@ package com.risingwave.java.binding; +import java.util.ArrayList; +import java.util.Iterator; import java.util.concurrent.TimeUnit; import org.openjdk.jmh.annotations.*; -@Warmup(iterations = 10, time = 1, timeUnit = TimeUnit.MILLISECONDS) -@Measurement(iterations = 20, time = 1, timeUnit = TimeUnit.MILLISECONDS) +@Warmup(iterations = 2, time = 1, timeUnit = TimeUnit.MILLISECONDS, batchSize = 10) +@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.MILLISECONDS, batchSize = 10) @Fork(value = 1) @BenchmarkMode(org.openjdk.jmh.annotations.Mode.AverageTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) +@OutputTimeUnit(TimeUnit.MILLISECONDS) @State(org.openjdk.jmh.annotations.Scope.Benchmark) public class StreamchunkBenchmark { @Param({"100", "1000", "10000"}) - static int loopTime; + int loopTime; - String str; - StreamChunkIterator iter; + Iterator iterOfIter; - @Setup(Level.Invocation) + @Setup(Level.Iteration) public void setup() { - str = "i i I f F B i"; - for (int i = 0; i < loopTime; i++) { - String b = i % 2 == 0 ? "f" : "t"; - String n = i % 2 == 0 ? "." : "1"; - str += String.format("\n + %d %d %d %d.0 %d.0 %s %s", i, i, i, i, i, b, n); + var iterList = new ArrayList(); + for (int iterI = 0; iterI < 10; iterI++) { + String str = "i i I f F B i"; + for (int i = 0; i < loopTime; i++) { + String b = i % 2 == 0 ? "f" : "t"; + String n = i % 2 == 0 ? "." : "1"; + str += String.format("\n + %d %d %d %d.0 %d.0 %s %s", i, i, i, i, i, b, n); + } + var iter = new StreamChunkIterator(str); + iterList.add(iter); } - iter = new StreamChunkIterator(str); + iterOfIter = iterList.iterator(); } public void getValue(StreamChunkRow row) { @@ -55,15 +61,18 @@ public void getValue(StreamChunkRow row) { @Benchmark public void streamchunkTest() { + if (!iterOfIter.hasNext()) { + throw new RuntimeException("too few prepared iter"); + } + var iter = iterOfIter.next(); int count = 0; while (true) { - try (StreamChunkRow row = iter.next()) { - if (row == null) { - break; - } - count += 1; - getValue(row); + StreamChunkRow row = iter.next(); + if (row == null) { + break; } + count += 1; + getValue(row); } if (count != loopTime) { throw new RuntimeException( diff --git a/java/java-binding-integration-test/pom.xml b/java/java-binding-integration-test/pom.xml index b5882db33bfb5..08938feb351f0 100644 --- a/java/java-binding-integration-test/pom.xml +++ b/java/java-binding-integration-test/pom.xml @@ -3,9 +3,9 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT 4.0.0 @@ -14,21 +14,22 @@ 11 11 + true - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave java-binding - com.risingwave.java + com.risingwave common-utils - \ No newline at end of file + diff --git a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java index 9f4038cf3f9a3..f1996bb96f43d 100644 --- a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java +++ b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java @@ -72,13 +72,12 @@ public static void main(String[] args) { try (HummockIterator iter = new HummockIterator(readPlan)) { int count = 0; while (true) { - try (KeyedRow row = iter.next()) { - if (row == null) { - break; - } - count += 1; - validateRow(row); + KeyedRow row = iter.next(); + if (row == null) { + break; } + count += 1; + validateRow(row); } int expectedCount = 30000; if (count != expectedCount) { diff --git a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java index 0cc6977de2f0c..ad59a74e4c20c 100644 --- a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java +++ b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java @@ -25,13 +25,12 @@ public static void main(String[] args) throws IOException { try (StreamChunkIterator iter = new StreamChunkIterator(payload)) { int count = 0; while (true) { - try (StreamChunkRow row = iter.next()) { - if (row == null) { - break; - } - count += 1; - validateRow(row); + StreamChunkRow row = iter.next(); + if (row == null) { + break; } + count += 1; + validateRow(row); } int expectedCount = 30000; if (count != expectedCount) { diff --git a/java/java-binding/pom.xml b/java/java-binding/pom.xml index a39288ca9e6c8..d53eaa26b2d57 100644 --- a/java/java-binding/pom.xml +++ b/java/java-binding/pom.xml @@ -1,13 +1,14 @@ - 4.0.0 - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT java-binding @@ -16,11 +17,13 @@ UTF-8 11 11 - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT 1.0.0 false + true + org.questdb @@ -28,11 +31,11 @@ ${jni.loader.version} - com.risingwave.java + com.risingwave proto - com.risingwave.java + com.risingwave common-utils @@ -60,7 +63,8 @@ build-jni-rust - + none diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java b/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java index a12978d92e995..d9fb28115b68c 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java @@ -14,75 +14,73 @@ package com.risingwave.java.binding; -public class BaseRow implements AutoCloseable { +public class BaseRow { protected final long pointer; - private boolean isClosed; protected BaseRow(long pointer) { this.pointer = pointer; - this.isClosed = false; } public boolean isNull(int index) { - return Binding.rowIsNull(pointer, index); + return Binding.iteratorIsNull(pointer, index); } public short getShort(int index) { - return Binding.rowGetInt16Value(pointer, index); + return Binding.iteratorGetInt16Value(pointer, index); } public int getInt(int index) { - return Binding.rowGetInt32Value(pointer, index); + return Binding.iteratorGetInt32Value(pointer, index); } public long getLong(int index) { - return Binding.rowGetInt64Value(pointer, index); + return Binding.iteratorGetInt64Value(pointer, index); } public float getFloat(int index) { - return Binding.rowGetFloatValue(pointer, index); + return Binding.iteratorGetFloatValue(pointer, index); } public double getDouble(int index) { - return Binding.rowGetDoubleValue(pointer, index); + return Binding.iteratorGetDoubleValue(pointer, index); } public boolean getBoolean(int index) { - return Binding.rowGetBooleanValue(pointer, index); + return Binding.iteratorGetBooleanValue(pointer, index); } public String getString(int index) { - return Binding.rowGetStringValue(pointer, index); + return Binding.iteratorGetStringValue(pointer, index); } public java.sql.Timestamp getTimestamp(int index) { - return Binding.rowGetTimestampValue(pointer, index); + return Binding.iteratorGetTimestampValue(pointer, index); } public java.sql.Time getTime(int index) { - return Binding.rowGetTimeValue(pointer, index); + return Binding.iteratorGetTimeValue(pointer, index); } public java.math.BigDecimal getDecimal(int index) { - return Binding.rowGetDecimalValue(pointer, index); + return Binding.iteratorGetDecimalValue(pointer, index); } public java.sql.Date getDate(int index) { - return Binding.rowGetDateValue(pointer, index); + return Binding.iteratorGetDateValue(pointer, index); } // string representation of interval: "2 mons 3 days 00:00:00.000004" or "P1Y2M3DT4H5M6.789123S" public String getInterval(int index) { - return Binding.rowGetIntervalValue(pointer, index); + return Binding.iteratorGetIntervalValue(pointer, index); } // string representation of jsonb: '{"key": "value"}' public String getJsonb(int index) { - return Binding.rowGetJsonbValue(pointer, index); + return Binding.iteratorGetJsonbValue(pointer, index); } public byte[] getBytea(int index) { - return Binding.rowGetByteaValue(pointer, index); + return Binding.iteratorGetByteaValue(pointer, index); } /** @@ -92,16 +90,8 @@ public byte[] getBytea(int index) { * Object[] elements) */ public Object[] getArray(int index, Class clazz) { - var val = Binding.rowGetArrayValue(pointer, index, clazz); + var val = Binding.iteratorGetArrayValue(pointer, index, clazz); assert (val instanceof Object[]); return (Object[]) val; } - - @Override - public void close() { - if (!isClosed) { - isClosed = true; - Binding.rowClose(pointer); - } - } } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java b/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java index 3f05768ec74b8..683437c9c2924 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java @@ -17,71 +17,73 @@ import io.questdb.jar.jni.JarJniLoader; public class Binding { + private static final boolean IS_EMBEDDED_CONNECTOR = + Boolean.parseBoolean(System.getProperty("is_embedded_connector")); + static { - JarJniLoader.loadLib(Binding.class, "/risingwave/jni", "risingwave_java_binding"); + if (!IS_EMBEDDED_CONNECTOR) { + JarJniLoader.loadLib(Binding.class, "/risingwave/jni", "risingwave_java_binding"); + } } public static native int vnodeCount(); // hummock iterator method // Return a pointer to the iterator - static native long hummockIteratorNew(byte[] readPlan); + static native long iteratorNewHummock(byte[] readPlan); + + static native boolean iteratorNext(long pointer); + + static native void iteratorClose(long pointer); - // return a pointer to the next row - static native long hummockIteratorNext(long pointer); + static native long iteratorNewFromStreamChunkPayload(byte[] streamChunkPayload); - // Since the underlying rust does not have garbage collection, we will have to manually call - // close on the iterator to release the iterator instance pointed by the pointer. - static native void hummockIteratorClose(long pointer); + static native long iteratorNewFromStreamChunkPretty(String str); - // row method - static native byte[] rowGetKey(long pointer); + static native byte[] iteratorGetKey(long pointer); - static native int rowGetOp(long pointer); + static native int iteratorGetOp(long pointer); - static native boolean rowIsNull(long pointer, int index); + static native boolean iteratorIsNull(long pointer, int index); - static native short rowGetInt16Value(long pointer, int index); + static native short iteratorGetInt16Value(long pointer, int index); - static native int rowGetInt32Value(long pointer, int index); + static native int iteratorGetInt32Value(long pointer, int index); - static native long rowGetInt64Value(long pointer, int index); + static native long iteratorGetInt64Value(long pointer, int index); - static native float rowGetFloatValue(long pointer, int index); + static native float iteratorGetFloatValue(long pointer, int index); - static native double rowGetDoubleValue(long pointer, int index); + static native double iteratorGetDoubleValue(long pointer, int index); - static native boolean rowGetBooleanValue(long pointer, int index); + static native boolean iteratorGetBooleanValue(long pointer, int index); - static native String rowGetStringValue(long pointer, int index); + static native String iteratorGetStringValue(long pointer, int index); - static native java.sql.Timestamp rowGetTimestampValue(long pointer, int index); + static native java.sql.Timestamp iteratorGetTimestampValue(long pointer, int index); - static native java.math.BigDecimal rowGetDecimalValue(long pointer, int index); + static native java.math.BigDecimal iteratorGetDecimalValue(long pointer, int index); - static native java.sql.Time rowGetTimeValue(long pointer, int index); + static native java.sql.Time iteratorGetTimeValue(long pointer, int index); - static native java.sql.Date rowGetDateValue(long pointer, int index); + static native java.sql.Date iteratorGetDateValue(long pointer, int index); - static native String rowGetIntervalValue(long pointer, int index); + static native String iteratorGetIntervalValue(long pointer, int index); - static native String rowGetJsonbValue(long pointer, int index); + static native String iteratorGetJsonbValue(long pointer, int index); - static native byte[] rowGetByteaValue(long pointer, int index); + static native byte[] iteratorGetByteaValue(long pointer, int index); // TODO: object or object array? - static native Object rowGetArrayValue(long pointer, int index, Class clazz); + static native Object iteratorGetArrayValue(long pointer, int index, Class clazz); - // Since the underlying rust does not have garbage collection, we will have to manually call - // close on the row to release the row instance pointed by the pointer. - static native void rowClose(long pointer); + public static native boolean sendCdcSourceMsgToChannel(long channelPtr, byte[] msg); - // stream chunk iterator method - static native long streamChunkIteratorNew(byte[] streamChunkPayload); + public static native byte[] recvSinkWriterRequestFromChannel(long channelPtr); - static native long streamChunkIteratorNext(long pointer); + public static native boolean sendSinkWriterResponseToChannel(long channelPtr, byte[] msg); - static native void streamChunkIteratorClose(long pointer); + public static native byte[] recvSinkCoordinatorRequestFromChannel(long channelPtr); - static native long streamChunkIteratorFromPretty(String str); + public static native boolean sendSinkCoordinatorResponseToChannel(long channelPtr, byte[] msg); } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java b/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java index ced034fd649d9..cf88068ddf615 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java @@ -21,13 +21,13 @@ public class HummockIterator implements AutoCloseable { private boolean isClosed; public HummockIterator(ReadPlan readPlan) { - this.pointer = Binding.hummockIteratorNew(readPlan.toByteArray()); + this.pointer = Binding.iteratorNewHummock(readPlan.toByteArray()); this.isClosed = false; } public KeyedRow next() { - long pointer = Binding.hummockIteratorNext(this.pointer); - if (pointer == 0) { + boolean hasNext = Binding.iteratorNext(this.pointer); + if (!hasNext) { return null; } return new KeyedRow(pointer); @@ -37,7 +37,7 @@ public KeyedRow next() { public void close() { if (!isClosed) { isClosed = true; - Binding.hummockIteratorClose(pointer); + Binding.iteratorClose(pointer); } } } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java b/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java index 6bbfdaafebabc..8f1e0b0117ac4 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java @@ -20,6 +20,6 @@ public KeyedRow(long pointer) { } public byte[] getKey() { - return Binding.rowGetKey(pointer); + return Binding.iteratorGetKey(pointer); } } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java index 89693befff700..5b300872bed51 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java @@ -19,7 +19,7 @@ public class StreamChunkIterator implements AutoCloseable { private boolean isClosed; public StreamChunkIterator(byte[] streamChunkPayload) { - this.pointer = Binding.streamChunkIteratorNew(streamChunkPayload); + this.pointer = Binding.iteratorNewFromStreamChunkPayload(streamChunkPayload); this.isClosed = false; } @@ -30,13 +30,13 @@ public StreamChunkIterator(byte[] streamChunkPayload) { * 40" */ public StreamChunkIterator(String str) { - this.pointer = Binding.streamChunkIteratorFromPretty(str); + this.pointer = Binding.iteratorNewFromStreamChunkPretty(str); this.isClosed = false; } public StreamChunkRow next() { - long pointer = Binding.streamChunkIteratorNext(this.pointer); - if (pointer == 0) { + boolean hasNext = Binding.iteratorNext(this.pointer); + if (!hasNext) { return null; } return new StreamChunkRow(pointer); @@ -46,7 +46,7 @@ public StreamChunkRow next() { public void close() { if (!isClosed) { isClosed = true; - Binding.streamChunkIteratorClose(pointer); + Binding.iteratorClose(pointer); } } } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java index 401d3d98f766d..2825d62a0b0ca 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java @@ -22,6 +22,6 @@ public StreamChunkRow(long pointer) { } public Data.Op getOp() { - return Data.Op.forNumber(Binding.rowGetOp(pointer)); + return Data.Op.forNumber(Binding.iteratorGetOp(pointer)); } } diff --git a/java/pom.xml b/java/pom.xml index e7c790e37a79f..4b815f9512eb5 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -1,11 +1,40 @@ - + 4.0.0 - com.risingwave.java - java-parent - 1.0-SNAPSHOT + com.risingwave + risingwave-java-root + 0.1.0-SNAPSHOT + pom + + RisingWave Java Root POM + https://www.risingwave.com + RisingWave is a distributed SQL streaming database. + + + + Apache License, Version 2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + + + Runji Wang + wangrunji0408@163.com + RisingWave Labs + https://www.risingwave.com + + + + + scm:git:https://github.com/risingwavelabs/risingwave.git + scm:git:https://github.com/risingwavelabs/risingwave.git + https://github.com/risingwavelabs/risingwave + + proto udf @@ -26,16 +55,17 @@ connector-node/assembly connector-node/s3-common - pom 11 11 1.0.0 + UTF-8 + UTF-8 3.21.1 1.53.0 2.10 - 1.0-SNAPSHOT + 0.1.0-SNAPSHOT 2.27.1 2.20.0 1.5.0 @@ -178,62 +208,62 @@ test - com.risingwave.java + com.risingwave proto ${module.version} - com.risingwave.java + com.risingwave java-binding ${module.version} - com.risingwave.java + com.risingwave common-utils ${module.version} - com.risingwave.java + com.risingwave connector-api ${module.version} - com.risingwave.java + com.risingwave s3-common ${module.version} - com.risingwave.java + com.risingwave risingwave-source-cdc ${module.version} - com.risingwave.java + com.risingwave risingwave-sink-iceberg ${module.version} - com.risingwave.java + com.risingwave risingwave-connector-service ${module.version} - com.risingwave.java + com.risingwave risingwave-sink-deltalake ${module.version} - com.risingwave.java + com.risingwave risingwave-sink-es-7 ${module.version} - com.risingwave.java + com.risingwave risingwave-sink-cassandra ${module.version} - com.risingwave.java + com.risingwave risingwave-sink-jdbc ${module.version} @@ -300,7 +330,7 @@ - + @@ -340,6 +370,75 @@ + + org.apache.maven.plugins + maven-source-plugin + 3.3.0 + + + attach-sources + + jar-no-fork + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.5.0 + + + attach-javadocs + + jar + + + + + com.risingwave.connector.* + + + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.13 + true + + ossrh + https://s01.oss.sonatype.org/ + true + + + + + + ossrh + https://s01.oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ + + diff --git a/java/proto/pom.xml b/java/proto/pom.xml index cd82f3b6e22d7..cb33ed9574e54 100644 --- a/java/proto/pom.xml +++ b/java/proto/pom.xml @@ -3,15 +3,19 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../pom.xml 4.0.0 proto + + true + + io.grpc @@ -47,9 +51,13 @@ 0.6.1 ${basedir}/../../proto/ - com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + grpc-java - io.grpc:protoc-gen-grpc-java:1.49.0:exe:${os.detected.classifier} + + io.grpc:protoc-gen-grpc-java:1.49.0:exe:${os.detected.classifier} + @@ -64,4 +72,4 @@ - \ No newline at end of file + diff --git a/java/udf-example/pom.xml b/java/udf-example/pom.xml index 781d89db7bbe5..dd3e54aca1fa2 100644 --- a/java/udf-example/pom.xml +++ b/java/udf-example/pom.xml @@ -1,22 +1,24 @@ - 4.0.0 + - java-parent - com.risingwave.java - 1.0-SNAPSHOT + com.risingwave + risingwave-java-root + 0.1.0-SNAPSHOT ../pom.xml - com.example - udf-example - 1.0-SNAPSHOT + com.risingwave + risingwave-udf-example + 0.1.0-SNAPSHOT udf-example - http://maven.apache.org + https://docs.risingwave.com/docs/current/udf-java UTF-8 @@ -26,9 +28,14 @@ - com.risingwave.java + com.risingwave risingwave-udf - 0.0.1 + 0.1.0-SNAPSHOT + + + com.google.code.gson + gson + 2.10.1 @@ -37,7 +44,7 @@ org.apache.maven.plugins maven-surefire-plugin - 3.0.0-M6 + 3.0.0 --add-opens=java.base/java.nio=ALL-UNNAMED @@ -71,4 +78,4 @@ - + \ No newline at end of file diff --git a/java/udf/README.md b/java/udf/README.md index f963fa6b368e0..200b897b8b890 100644 --- a/java/udf/README.md +++ b/java/udf/README.md @@ -18,6 +18,19 @@ cd risingwave/java/udf mvn install ``` +Or you can add the following dependency to your `pom.xml` file: + +```xml + + + com.risingwave + risingwave-udf + 0.1.0 + + +``` + + ## Creating a New Project > NOTE: You can also start from the [udf-example](../udf-example) project without creating the project from scratch. @@ -41,9 +54,9 @@ Configure your `pom.xml` file as follows: - com.risingwave.java + com.risingwave risingwave-udf - 0.0.1 + 0.1.0 @@ -57,7 +70,7 @@ The `--add-opens` flag must be added when running unit tests through Maven: org.apache.maven.plugins maven-surefire-plugin - 3.0.0-M7 + 3.0.0 --add-opens=java.base/java.nio=ALL-UNNAMED diff --git a/java/udf/pom.xml b/java/udf/pom.xml index c589136b8b302..e9ae758f5a880 100644 --- a/java/udf/pom.xml +++ b/java/udf/pom.xml @@ -1,23 +1,22 @@ - 4.0.0 - com.risingwave.java + + com.risingwave risingwave-udf jar - 0.0.1 + 0.1.0-SNAPSHOT + - java-parent - com.risingwave.java - 1.0-SNAPSHOT + risingwave-java-root + com.risingwave + 0.1.0-SNAPSHOT ../pom.xml - risingwave-udf - http://maven.apache.org - - 11 - 11 - + RisingWave Java UDF SDK + https://docs.risingwave.com/docs/current/udf-java @@ -29,17 +28,12 @@ org.apache.arrow arrow-vector - 12.0.0 + 13.0.0 org.apache.arrow flight-core - 12.0.0 - - - com.google.code.gson - gson - 2.10.1 + 13.0.0 org.slf4j diff --git a/java/udf/src/main/java/com/risingwave/functions/UdfServer.java b/java/udf/src/main/java/com/risingwave/functions/UdfServer.java index ad2b5e1ac6b68..7d063a8d80d37 100644 --- a/java/udf/src/main/java/com/risingwave/functions/UdfServer.java +++ b/java/udf/src/main/java/com/risingwave/functions/UdfServer.java @@ -55,18 +55,30 @@ public void addFunction(String name, UserDefinedFunction udf) throws IllegalArgu this.producer.addFunction(name, udf); } - /** Start the server. */ + /** + * Start the server. + * + * @throws IOException if the server fails to start + */ public void start() throws IOException { this.server.start(); logger.info("listening on " + this.server.getLocation().toSocketAddress()); } - /** Get the port the server is listening on. */ + /** + * Get the port the server is listening on. + * + * @return the port number + */ public int getPort() { return this.server.getPort(); } - /** Wait for the server to terminate. */ + /** + * Wait for the server to terminate. + * + * @throws InterruptedException if the thread is interrupted while waiting + */ public void awaitTermination() throws InterruptedException { this.server.awaitTermination(); } diff --git a/proto/backup_service.proto b/proto/backup_service.proto index 425d3abb24e2f..feca5f17b7dc3 100644 --- a/proto/backup_service.proto +++ b/proto/backup_service.proto @@ -13,11 +13,8 @@ enum BackupJobStatus { UNSPECIFIED = 0; RUNNING = 1; SUCCEEDED = 2; - // NOT_FOUND indicates one of these cases: - // - Invalid job id. - // - Job has failed. - // - Job has succeeded, but its resulted backup has been deleted later. NOT_FOUND = 3; + FAILED = 4; } message BackupMetaRequest {} message BackupMetaResponse { @@ -29,6 +26,7 @@ message GetBackupJobStatusRequest { message GetBackupJobStatusResponse { uint64 job_id = 1; BackupJobStatus job_status = 2; + string message = 3; } message DeleteMetaSnapshotRequest { repeated uint64 snapshot_ids = 1; diff --git a/proto/buf.yaml b/proto/buf.yaml index a2870930bb4ac..1aa31816ce0af 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -9,6 +9,7 @@ lint: # This proto is copied from https://github.com/grpc/grpc/blob/v1.15.0/doc/health-checking.md # It violates some lint rules, so we ignore it. - health.proto + allow_comment_ignores: true enum_zero_value_suffix: UNSPECIFIED except: - ENUM_VALUE_PREFIX # Enum variant doesn't have to prefix with enum name. diff --git a/proto/catalog.proto b/proto/catalog.proto index 9af808e141bae..2dad1ecf3394e 100644 --- a/proto/catalog.proto +++ b/proto/catalog.proto @@ -32,6 +32,21 @@ enum SchemaRegistryNameStrategy { TOPIC_RECORD_NAME_STRATEGY = 2; } +enum StreamJobStatus { + // Prefixed by `STREAM_JOB_STATUS` due to protobuf namespacing rules. + STREAM_JOB_STATUS_UNSPECIFIED = 0; + CREATING = 1; + CREATED = 2; +} + +// How the stream job was created will determine +// whether they are persisted. +enum CreateType { + CREATE_TYPE_UNSPECIFIED = 0; + BACKGROUND = 1; + FOREGROUND = 2; +} + message StreamSourceInfo { // deprecated plan_common.RowFormatType row_format = 1; @@ -91,6 +106,13 @@ enum SinkType { UPSERT = 3; } +// Similar to `StreamSourceInfo`, and may replace `SinkType` later. +message SinkFormatDesc { + plan_common.FormatType format = 1; + plan_common.EncodeType encode = 2; + map options = 3; +} + // the catalog of the sink. There are two kind of schema here. The full schema is all columns // stored in the `column` which is the sink executor/fragment's output schema. The visible // schema contains the columns whose `is_hidden` is false, which is the columns sink out to the @@ -107,7 +129,7 @@ message Sink { repeated int32 distribution_key = 8; // User-defined primary key indices for the upsert sink. repeated int32 downstream_pk = 9; - SinkType sink_type = 10; + SinkType sink_type = 10; // to be deprecated uint32 owner = 11; map properties = 12; string definition = 13; @@ -116,6 +138,8 @@ message Sink { optional uint64 created_at_epoch = 16; string db_name = 17; string sink_from_name = 18; + StreamJobStatus stream_job_status = 19; + SinkFormatDesc format_desc = 20; } message Connection { @@ -157,6 +181,7 @@ message Index { optional uint64 initialized_at_epoch = 10; optional uint64 created_at_epoch = 11; + StreamJobStatus stream_job_status = 12; } message Function { @@ -256,6 +281,11 @@ message Table { // In older versions we can just initialize without it. bool cleaned_by_watermark = 30; + // Used to filter created / creating tables in meta. + StreamJobStatus stream_job_status = 31; + + CreateType create_type = 32; + // Per-table catalog version, used by schema change. `None` for internal tables and tests. // Not to be confused with the global catalog version for notification service. TableVersion version = 100; diff --git a/proto/compactor.proto b/proto/compactor.proto index 29dee929d981b..06f071d75040a 100644 --- a/proto/compactor.proto +++ b/proto/compactor.proto @@ -2,6 +2,10 @@ syntax = "proto3"; package compactor; +import "catalog.proto"; +import "common.proto"; +import "hummock.proto"; + option java_package = "com.risingwave.proto"; option optimize_for = SPEED; @@ -9,6 +13,27 @@ message EchoRequest {} message EchoResponse {} +message DispatchCompactionTaskRequest { + // DispatchCompactionTaskRequest is used to pass compaction task level parameters. + oneof task { + hummock.CompactTask compact_task = 1; + hummock.VacuumTask vacuum_task = 2; + hummock.FullScanTask full_scan_task = 3; + hummock.ValidationTask validation_task = 4; + hummock.CancelCompactTask cancel_compact_task = 5; + } + // Used to build filter_key_extract. + repeated catalog.Table tables = 6; + // We can generate output object ids for each compactor, which equal to the number of input SSTs. + // If all the output object ids are used up, this compaction task fails, and the next allocation will be twice the previous amount of output object ids. + repeated uint64 output_object_ids = 7; +} + +message DispatchCompactionTaskResponse { + common.Status status = 1; +} + service CompactorService { rpc Echo(EchoRequest) returns (EchoResponse); + rpc DispatchCompactionTask(DispatchCompactionTaskRequest) returns (DispatchCompactionTaskResponse); } diff --git a/proto/connector_service.proto b/proto/connector_service.proto index 54728bf4ecdec..e750c8ce96e9d 100644 --- a/proto/connector_service.proto +++ b/proto/connector_service.proto @@ -23,9 +23,10 @@ message SinkParam { uint32 sink_id = 1; map properties = 2; TableSchema table_schema = 3; - catalog.SinkType sink_type = 4; + catalog.SinkType sink_type = 4; // to be deprecated string db_name = 5; string sink_from_name = 6; + catalog.SinkFormatDesc format_desc = 7; } enum SinkPayloadFormat { diff --git a/proto/ddl_service.proto b/proto/ddl_service.proto index 35ae7e0b01bb4..1efc933a7d033 100644 --- a/proto/ddl_service.proto +++ b/proto/ddl_service.proto @@ -239,6 +239,8 @@ message ReplaceTablePlanRequest { stream_plan.StreamFragmentGraph fragment_graph = 2; // The mapping from the old columns to the new columns of the table. catalog.ColIndexMapping table_col_index_mapping = 3; + // Source catalog of table's associated source + catalog.Source source = 4; } message ReplaceTablePlanResponse { @@ -312,6 +314,10 @@ message GetTablesResponse { map tables = 1; } +message WaitRequest {} + +message WaitResponse {} + service DdlService { rpc CreateDatabase(CreateDatabaseRequest) returns (CreateDatabaseResponse); rpc DropDatabase(DropDatabaseRequest) returns (DropDatabaseResponse); @@ -341,4 +347,5 @@ service DdlService { rpc ListConnections(ListConnectionsRequest) returns (ListConnectionsResponse); rpc DropConnection(DropConnectionRequest) returns (DropConnectionResponse); rpc GetTables(GetTablesRequest) returns (GetTablesResponse); + rpc Wait(WaitRequest) returns (WaitResponse); } diff --git a/proto/expr.proto b/proto/expr.proto index fdeee76ce2425..745231a9cd2b5 100644 --- a/proto/expr.proto +++ b/proto/expr.proto @@ -33,6 +33,8 @@ message ExprNode { LESS_THAN_OR_EQUAL = 11; GREATER_THAN = 12; GREATER_THAN_OR_EQUAL = 13; + GREATEST = 14; + LEAST = 15; // logical operators AND = 21; OR = 22; @@ -104,6 +106,7 @@ message ExprNode { REGEXP_MATCH = 232; REGEXP_REPLACE = 280; REGEXP_COUNT = 281; + REGEXP_SPLIT_TO_ARRAY = 282; POW = 233; EXP = 234; CHR = 235; @@ -199,6 +202,7 @@ message ExprNode { ARRAY_TRANSFORM = 545; ARRAY_MIN = 546; ARRAY_MAX = 547; + ARRAY_SUM = 548; ARRAY_SORT = 549; // Int256 functions @@ -213,6 +217,7 @@ message ExprNode { JSONB_TYPEOF = 602; JSONB_ARRAY_LENGTH = 603; IS_JSON = 604; + JSONB_CAT = 605; // Non-pure functions below (> 1000) // ------------------------ @@ -220,6 +225,13 @@ message ExprNode { VNODE = 1101; // Non-deterministic functions PROCTIME = 2023; + PG_SLEEP = 2024; + PG_SLEEP_FOR = 2025; + PG_SLEEP_UNTIL = 2026; + + // Adminitration functions + COL_DESCRIPTION = 2100; + CAST_REGCLASS = 2101; } // Only use this field for function call. For other types of expression, it should be UNSPECIFIED. Type function_type = 1; @@ -241,6 +253,8 @@ message TableFunction { REGEXP_MATCHES = 3; RANGE = 4; GENERATE_SUBSCRIPTS = 5; + // buf:lint:ignore ENUM_VALUE_UPPER_SNAKE_CASE + _PG_EXPANDARRAY = 6; // Jsonb functions JSONB_ARRAY_ELEMENTS = 10; JSONB_ARRAY_ELEMENTS_TEXT = 11; @@ -335,6 +349,7 @@ message AggCall { MODE = 24; LAST_VALUE = 25; GROUPING = 26; + INTERNAL_LAST_SEEN_VALUE = 27; } Type type = 1; repeated InputRef args = 2; diff --git a/proto/hummock.proto b/proto/hummock.proto index b0a5152821009..ee7d532974ff3 100644 --- a/proto/hummock.proto +++ b/proto/hummock.proto @@ -8,6 +8,12 @@ import "common.proto"; option java_package = "com.risingwave.proto"; option optimize_for = SPEED; +enum BloomFilterType { + BLOOM_FILTER_UNSPECIFIED = 0; + SSTABLE = 1; + BLOCKED = 2; +} + message SstableInfo { uint64 object_id = 1; uint64 sst_id = 2; @@ -21,6 +27,7 @@ message SstableInfo { uint64 max_epoch = 10; uint64 uncompressed_file_size = 11; uint64 range_tombstone_count = 12; + BloomFilterType bloom_filter_kind = 13; } enum LevelType { @@ -295,6 +302,7 @@ message CompactTask { SHARED_BUFFER = 4; TTL = 5; TOMBSTONE = 6; + EMERGENCY = 7; } // Identifies whether the task is space_reclaim, if the compact_task_type increases, it will be refactored to enum @@ -387,8 +395,13 @@ message SubscribeCompactionEventRequest { // ReportTask provides the compact task to report to the meta. message ReportTask { - CompactTask compact_task = 2; + reserved 2; + reserved "compact_task"; map table_stats_change = 3; + + uint64 task_id = 4; + CompactTask.TaskStatus task_status = 5; + repeated SstableInfo sorted_output_ssts = 6; } // HeartBeat provides the progress status of all tasks on the Compactor. @@ -431,6 +444,26 @@ message SubscribeCompactionEventResponse { uint64 create_at = 7; } +message ReportCompactionTaskRequest { + // ReportTask provides the compact task to report to the meta. + message ReportTask { + CompactTask compact_task = 2; + map table_stats_change = 3; + } + // HeartBeat provides the progress status of all tasks on the Compactor. + message HeartBeat { + repeated CompactTaskProgress progress = 2; + } + oneof event { + ReportTask report_task = 1; + HeartBeat heart_beat = 2; + } +} + +message ReportCompactionTaskResponse { + common.Status status = 1; +} + message ValidationTask { repeated SstableInfo sst_infos = 1; map sst_id_to_worker_id = 2; @@ -578,6 +611,8 @@ message RiseCtlUpdateCompactionConfigRequest { uint32 level0_overlapping_sub_level_compact_level_count = 12; uint64 max_space_reclaim_bytes = 13; uint64 level0_max_compact_file_number = 14; + bool enable_emergency_picker = 15; + uint32 tombstone_reclaim_ratio = 16; } } repeated uint64 compaction_group_ids = 1; @@ -627,6 +662,44 @@ message RiseCtlListCompactionStatusResponse { repeated CompactTaskProgress task_progress = 3; } +message ListBranchedObjectRequest {} + +message ListBranchedObjectResponse { + repeated BranchedObject branched_objects = 1; +} + +message ListActiveWriteLimitRequest {} + +message ListActiveWriteLimitResponse { + // < compaction group id, write limit info > + map write_limits = 1; +} + +message ListHummockMetaConfigRequest {} + +message ListHummockMetaConfigResponse { + map configs = 1; +} + +message RiseCtlRebuildTableStatsRequest {} + +message RiseCtlRebuildTableStatsResponse {} + +message GetCompactionScoreRequest { + uint64 compaction_group_id = 1; +} + +message GetCompactionScoreResponse { + message PickerInfo { + uint64 score = 1; + uint64 select_level = 2; + uint64 target_level = 3; + string picker_type = 4; + } + uint64 compaction_group_id = 1; + repeated PickerInfo scores = 2; +} + service HummockManagerService { rpc UnpinVersionBefore(UnpinVersionBeforeRequest) returns (UnpinVersionBeforeResponse); rpc GetCurrentVersion(GetCurrentVersionRequest) returns (GetCurrentVersionResponse); @@ -652,11 +725,17 @@ service HummockManagerService { rpc RiseCtlPauseVersionCheckpoint(RiseCtlPauseVersionCheckpointRequest) returns (RiseCtlPauseVersionCheckpointResponse); rpc RiseCtlResumeVersionCheckpoint(RiseCtlResumeVersionCheckpointRequest) returns (RiseCtlResumeVersionCheckpointResponse); rpc RiseCtlGetCheckpointVersion(RiseCtlGetCheckpointVersionRequest) returns (RiseCtlGetCheckpointVersionResponse); + rpc RiseCtlRebuildTableStats(RiseCtlRebuildTableStatsRequest) returns (RiseCtlRebuildTableStatsResponse); rpc InitMetadataForReplay(InitMetadataForReplayRequest) returns (InitMetadataForReplayResponse); rpc PinVersion(PinVersionRequest) returns (PinVersionResponse); rpc SplitCompactionGroup(SplitCompactionGroupRequest) returns (SplitCompactionGroupResponse); rpc RiseCtlListCompactionStatus(RiseCtlListCompactionStatusRequest) returns (RiseCtlListCompactionStatusResponse); rpc SubscribeCompactionEvent(stream SubscribeCompactionEventRequest) returns (stream SubscribeCompactionEventResponse); + rpc ReportCompactionTask(ReportCompactionTaskRequest) returns (ReportCompactionTaskResponse); + rpc ListBranchedObject(ListBranchedObjectRequest) returns (ListBranchedObjectResponse); + rpc ListActiveWriteLimit(ListActiveWriteLimitRequest) returns (ListActiveWriteLimitResponse); + rpc ListHummockMetaConfig(ListHummockMetaConfigRequest) returns (ListHummockMetaConfigResponse); + rpc GetCompactionScore(GetCompactionScoreRequest) returns (GetCompactionScoreResponse); } message CompactionConfig { @@ -687,6 +766,7 @@ message CompactionConfig { // for tier compaction pick overlapping level uint32 level0_overlapping_sub_level_compact_level_count = 18; uint32 tombstone_reclaim_ratio = 19; + bool enable_emergency_picker = 20; } message TableStats { @@ -708,3 +788,10 @@ message WriteLimits { // < compaction group id, write limit info > map write_limits = 1; } + +message BranchedObject { + uint64 object_id = 1; + uint64 sst_id = 2; + // Compaction group id the SST belongs to. + uint64 compaction_group_id = 3; +} diff --git a/proto/meta.proto b/proto/meta.proto index e81155350ba27..f9947d278b468 100644 --- a/proto/meta.proto +++ b/proto/meta.proto @@ -221,6 +221,7 @@ message ListFragmentDistributionResponse { repeated uint32 state_table_ids = 4; repeated uint32 upstream_fragment_ids = 5; uint32 fragment_type_mask = 6; + uint32 parallelism = 7; } repeated FragmentDistribution distributions = 1; } @@ -495,7 +496,7 @@ service MetaMemberService { // The schema for persisted system parameters. // Note on backward compatibility: -// - Do not remove deprecated fields. Mark them as deprecated both after the field definition and in `system_params/mod.rs` instead. +// - Do not remove deprecated fields. Mark them as deprecated instead. // - Do not rename existing fields, since each field is stored separately in the meta store with the field name as the key. // - To modify (rename, change the type or semantic of) a field, introduce a new field suffixed by the version. message SystemParams { diff --git a/proto/monitor_service.proto b/proto/monitor_service.proto index e364a2bff7704..7c7769da6b7ff 100644 --- a/proto/monitor_service.proto +++ b/proto/monitor_service.proto @@ -31,8 +31,27 @@ message HeapProfilingRequest { message HeapProfilingResponse {} +message ListHeapProfilingRequest {} +message ListHeapProfilingResponse { + string dir = 1; + repeated string name_manually = 2; + repeated string name_auto = 3; +} + +// Analyze dumped files +message AnalyzeHeapRequest { + // The file path + string path = 1; +} + +message AnalyzeHeapResponse { + bytes result = 1; +} + service MonitorService { rpc StackTrace(StackTraceRequest) returns (StackTraceResponse); rpc Profiling(ProfilingRequest) returns (ProfilingResponse); rpc HeapProfiling(HeapProfilingRequest) returns (HeapProfilingResponse); + rpc ListHeapProfiling(ListHeapProfilingRequest) returns (ListHeapProfilingResponse); + rpc AnalyzeHeap(AnalyzeHeapRequest) returns (AnalyzeHeapResponse); } diff --git a/proto/plan_common.proto b/proto/plan_common.proto index 24ed4c8372b86..d4c7a2e04f138 100644 --- a/proto/plan_common.proto +++ b/proto/plan_common.proto @@ -49,7 +49,12 @@ message GeneratedColumnDesc { } message DefaultColumnDesc { + // Expression of the `DEFAULT`. Used when inserting new records. expr.ExprNode expr = 1; + // Evaluated value of the expression at the time of the table creation or the + // column addition. Used when filling the default value for the records where + // the column is missing. + data.Datum snapshot_value = 2; } message StorageTableDesc { @@ -101,6 +106,7 @@ enum EncodeType { ENCODE_TYPE_PROTOBUF = 4; ENCODE_TYPE_JSON = 5; ENCODE_TYPE_BYTES = 6; + ENCODE_TYPE_TEMPLATE = 7; } enum RowFormatType { diff --git a/proto/stream_plan.proto b/proto/stream_plan.proto index 33fe96a71c803..683a43ef6e9be 100644 --- a/proto/stream_plan.proto +++ b/proto/stream_plan.proto @@ -150,6 +150,23 @@ message StreamSource { map properties = 6; catalog.StreamSourceInfo info = 7; string source_name = 8; + // Streaming rate limit + optional uint32 rate_limit = 9; +} + +// copy contents from StreamSource to prevent compatibility issues in the future +message StreamFsFetch { + uint32 source_id = 1; + catalog.Table state_table = 2; + optional uint32 row_id_index = 3; + repeated plan_common.ColumnCatalog columns = 4; + reserved "pk_column_ids"; + reserved 5; + map properties = 6; + catalog.StreamSourceInfo info = 7; + string source_name = 8; + // Streaming rate limit + optional uint32 rate_limit = 9; } // The executor only for receiving barrier from the meta service. It always resides in the leaves @@ -162,6 +179,10 @@ message SourceNode { StreamSource source_inner = 1; } +message StreamFsFetchNode { + StreamFsFetch node_inner = 1; +} + message SinkDesc { reserved 4; reserved "columns"; @@ -172,12 +193,13 @@ message SinkDesc { repeated uint32 downstream_pk = 6; repeated uint32 distribution_key = 7; map properties = 8; - catalog.SinkType sink_type = 9; + catalog.SinkType sink_type = 9; // to be deprecated repeated plan_common.ColumnCatalog column_catalogs = 10; string db_name = 11; // If the sink is from table or mv, this is name of the table/mv. Otherwise // it is the name of the sink itself. string sink_from_name = 12; + catalog.SinkFormatDesc format_desc = 13; } enum SinkLogStoreType { @@ -251,7 +273,7 @@ message SimpleAggNode { // Only used for stateless simple agg. repeated uint32 distribution_key = 2; repeated AggCallState agg_call_states = 3; - catalog.Table result_table = 4; + catalog.Table intermediate_state_table = 4; // Whether to optimize for append only stream. // It is true when the input is append-only bool is_append_only = 5; @@ -263,7 +285,7 @@ message HashAggNode { repeated uint32 group_key = 1; repeated expr.AggCall agg_calls = 2; repeated AggCallState agg_call_states = 3; - catalog.Table result_table = 4; + catalog.Table intermediate_state_table = 4; // Whether to optimize for append only stream. // It is true when the input is append-only bool is_append_only = 5; @@ -459,6 +481,9 @@ message ChainNode { // The rate limit for the chain node. optional uint32 rate_limit = 8; + + // Snapshot read every N barriers + uint32 snapshot_read_barrier_interval = 9 [deprecated = true]; } // BatchPlanNode is used for mv on mv snapshot read. @@ -653,6 +678,7 @@ message StreamNode { NoOpNode no_op = 135; EowcOverWindowNode eowc_over_window = 136; OverWindowNode over_window = 137; + StreamFsFetchNode stream_fs_fetch = 138; } // The id for the operator. This is local per mview. // TODO: should better be a uint32. diff --git a/risedev.yml b/risedev.yml index 0ad428794e37c..135a33f602a6a 100644 --- a/risedev.yml +++ b/risedev.yml @@ -26,9 +26,6 @@ profile: # - use: aws-s3 # bucket: test-bucket - # If you want to create CDC source table, uncomment the following line - # - use: connector-node - # if you want to enable etcd backend, uncomment the following lines. # - use: etcd # unsafe-no-fsync: true @@ -110,22 +107,6 @@ profile: - use: kafka persist-data: true - full-with-connector: - steps: - - use: minio - - use: etcd - - use: meta-node - - use: compute-node - - use: frontend - - use: compactor - - use: prometheus - - use: grafana - - use: zookeeper - persist-data: true - - use: kafka - persist-data: true - - use: connector-node - standalone-full-peripherals: steps: - use: minio @@ -137,15 +118,15 @@ profile: - use: frontend user-managed: true - use: compactor + user-managed: true - use: prometheus - use: grafana - use: zookeeper persist-data: true - use: kafka persist-data: true - - use: connector-node - standalone-minio-etcd-compactor: + standalone-minio-etcd: steps: - use: minio - use: etcd @@ -156,8 +137,9 @@ profile: - use: frontend user-managed: true - use: compactor + user-managed: true - standalone-full-peripherals-without-kafka: + standalone-minio-etcd-compactor: steps: - use: minio - use: etcd @@ -168,11 +150,6 @@ profile: - use: frontend user-managed: true - use: compactor - - use: prometheus - - use: grafana - - use: zookeeper - persist-data: true - - use: connector-node hdfs: steps: @@ -293,6 +270,21 @@ profile: exporter-port: 21250 - use: compactor + 3meta: + steps: + - use: meta-node + port: 5690 + dashboard-port: 5691 + exporter-port: 1250 + - use: meta-node + port: 15690 + dashboard-port: 15691 + exporter-port: 11250 + - use: meta-node + port: 25690 + dashboard-port: 25691 + exporter-port: 21250 + 3etcd-3meta-1cn-1fe: steps: - use: minio @@ -693,40 +685,6 @@ profile: - use: pubsub persist-data: true - ci-kafka: - config-path: src/config/ci.toml - steps: - - use: minio - - use: etcd - unsafe-no-fsync: true - - use: meta-node - - use: compute-node - enable-tiered-cache: true - - use: frontend - - use: compactor - - use: zookeeper - persist-data: true - - use: kafka - persist-data: true - - ci-kafka-plus-pubsub: - config-path: src/config/ci.toml - steps: - - use: minio - - use: etcd - unsafe-no-fsync: true - - use: meta-node - - use: compute-node - enable-tiered-cache: true - - use: frontend - - use: compactor - - use: zookeeper - persist-data: true - - use: kafka - persist-data: true - - use: pubsub - persist-data: true - ci-redis: config-path: src/config/ci.toml steps: @@ -805,6 +763,16 @@ profile: - use: frontend - use: compactor + ci-pulsar-test: + config-path: src/config/ci.toml + steps: + - use: minio + - use: meta-node + - use: compute-node + enable-tiered-cache: true + - use: frontend + - use: compactor + hummock-trace: config-path: src/config/hummock-trace.toml steps: @@ -927,9 +895,6 @@ template: # If `enable-tiered-cache` is true, hummock will use data directory as file cache. enable-tiered-cache: false - # RPC endpoint for connector node - connector-rpc-endpoint: "127.0.0.1:50051" - # Minio instances used by this compute node provide-minio: "minio*" @@ -975,9 +940,6 @@ template: # Id of this instance id: meta-node-${port} - # RPC endpoint for connector node colocated with Meta - connector-rpc-endpoint: "127.0.0.1:50051" - # If `user-managed` is true, this service will be started by user with the above config user-managed: false @@ -1051,9 +1013,6 @@ template: # Frontend used by this Prometheus instance provide-frontend: "frontend*" - # Connector-node used by this Prometheus instance - provide-connector-node: "connector*" - frontend: # Advertise address of frontend address: "127.0.0.1" @@ -1110,19 +1069,6 @@ template: # If `user-managed` is true, this service will be started by user with the above config user-managed: false - connector-node: - # Connector node advertise address - address: "127.0.0.1" - - # Connector node listen port - port: 50051 - - # Prometheus exporter listen port - exporter-port: 50052 - - # Id of this instance - id: connector-${port} - grafana: # Listen address of Grafana listen-address: ${address} diff --git a/scripts/coredump/coredump.entitlements b/scripts/coredump/coredump.entitlements new file mode 100644 index 0000000000000..3842541b7b0d7 --- /dev/null +++ b/scripts/coredump/coredump.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.security.get-task-allow + + + diff --git a/scripts/source/alter_data/kafka_alter.3 b/scripts/source/alter_data/kafka_alter.3 new file mode 100644 index 0000000000000..fb9015ae75caf --- /dev/null +++ b/scripts/source/alter_data/kafka_alter.3 @@ -0,0 +1 @@ +{"v1": 3, "v2": "33", "v3": 333} \ No newline at end of file diff --git a/src/batch/Cargo.toml b/src/batch/Cargo.toml index 5f90151a400ab..fef154450a563 100644 --- a/src/batch/Cargo.toml +++ b/src/batch/Cargo.toml @@ -17,7 +17,6 @@ normal = ["workspace-hack"] anyhow = "1" assert_matches = "1" async-recursion = "1" -async-stream = "0.3.5" async-trait = "0.1" either = "1" futures = { version = "0.3", default-features = false, features = ["alloc"] } @@ -53,7 +52,6 @@ tokio-metrics = "0.3.0" tokio-stream = "0.1" tonic = { workspace = true } tracing = "0.1" -uuid = "1" [target.'cfg(enable_task_local_alloc)'.dependencies] task_stats_alloc = { path = "../utils/task_stats_alloc" } @@ -64,10 +62,9 @@ workspace-hack = { path = "../workspace-hack" } [dev-dependencies] criterion = { workspace = true, features = ["async_tokio", "async"] } rand = "0.8" +risingwave_expr_impl = { workspace = true } tempfile = "3" - -[target.'cfg(unix)'.dev-dependencies] -tikv-jemallocator = { git = "https://github.com/risingwavelabs/jemallocator.git", rev = "b7f9f3" } +tikv-jemallocator = { workspace = true } [[bench]] name = "filter" diff --git a/src/batch/benches/expand.rs b/src/batch/benches/expand.rs index c300408bd8882..428b41c3bed5e 100644 --- a/src/batch/benches/expand.rs +++ b/src/batch/benches/expand.rs @@ -15,12 +15,12 @@ pub mod utils; use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use risingwave_batch::executor::{BoxedExecutor, ExpandExecutor}; -use risingwave_common::enable_jemalloc_on_unix; +use risingwave_common::enable_jemalloc; use risingwave_common::types::DataType; use tokio::runtime::Runtime; use utils::{create_input, execute_executor}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_expand_executor( column_subsets: Vec>, diff --git a/src/batch/benches/filter.rs b/src/batch/benches/filter.rs index 28169ba6bcab5..1c2ff9c062a0e 100644 --- a/src/batch/benches/filter.rs +++ b/src/batch/benches/filter.rs @@ -16,13 +16,13 @@ pub mod utils; use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use risingwave_batch::executor::{BoxedExecutor, FilterExecutor}; -use risingwave_common::enable_jemalloc_on_unix; +use risingwave_common::enable_jemalloc; use risingwave_common::types::DataType; use risingwave_expr::expr::build_from_pretty; use tokio::runtime::Runtime; use utils::{create_input, execute_executor}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_filter_executor(chunk_size: usize, chunk_num: usize) -> BoxedExecutor { const CHUNK_SIZE: usize = 1024; diff --git a/src/batch/benches/hash_agg.rs b/src/batch/benches/hash_agg.rs index e5a561e03a535..6e45b5317ef6f 100644 --- a/src/batch/benches/hash_agg.rs +++ b/src/batch/benches/hash_agg.rs @@ -21,13 +21,13 @@ use risingwave_batch::task::ShutdownToken; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::memory::MemoryContext; use risingwave_common::types::DataType; -use risingwave_common::{enable_jemalloc_on_unix, hash}; -use risingwave_expr::agg::{AggCall, AggKind}; +use risingwave_common::{enable_jemalloc, hash}; +use risingwave_expr::aggregate::{AggCall, AggKind}; use risingwave_pb::expr::{PbAggCall, PbInputRef}; use tokio::runtime::Runtime; use utils::{create_input, execute_executor}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_agg_call( input_schema: &Schema, diff --git a/src/batch/benches/hash_join.rs b/src/batch/benches/hash_join.rs index b594c9da1642f..45d8bca36fb0a 100644 --- a/src/batch/benches/hash_join.rs +++ b/src/batch/benches/hash_join.rs @@ -22,11 +22,11 @@ use risingwave_batch::task::ShutdownToken; use risingwave_common::catalog::schema_test_utils::field_n; use risingwave_common::memory::MemoryContext; use risingwave_common::types::DataType; -use risingwave_common::{enable_jemalloc_on_unix, hash}; +use risingwave_common::{enable_jemalloc, hash}; use risingwave_expr::expr::build_from_pretty; use utils::bench_join; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_hash_join_executor( join_type: JoinType, diff --git a/src/batch/benches/limit.rs b/src/batch/benches/limit.rs index 3096a8cbea6eb..4905e35ff8beb 100644 --- a/src/batch/benches/limit.rs +++ b/src/batch/benches/limit.rs @@ -16,12 +16,12 @@ pub mod utils; use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use risingwave_batch::executor::{BoxedExecutor, LimitExecutor}; -use risingwave_common::enable_jemalloc_on_unix; +use risingwave_common::enable_jemalloc; use risingwave_common::types::DataType; use tokio::runtime::Runtime; use utils::{create_input, execute_executor}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_limit_executor( chunk_size: usize, diff --git a/src/batch/benches/nested_loop_join.rs b/src/batch/benches/nested_loop_join.rs index b5fc33307c0ef..afbf2b8f17e4f 100644 --- a/src/batch/benches/nested_loop_join.rs +++ b/src/batch/benches/nested_loop_join.rs @@ -16,13 +16,13 @@ pub mod utils; use criterion::{criterion_group, criterion_main, Criterion}; use risingwave_batch::executor::{BoxedExecutor, JoinType, NestedLoopJoinExecutor}; use risingwave_batch::task::ShutdownToken; -use risingwave_common::enable_jemalloc_on_unix; +use risingwave_common::enable_jemalloc; use risingwave_common::memory::MemoryContext; use risingwave_common::types::DataType; use risingwave_expr::expr::build_from_pretty; use utils::{bench_join, create_input}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_nested_loop_join_executor( join_type: JoinType, diff --git a/src/batch/benches/sort.rs b/src/batch/benches/sort.rs index 1c089790f5f7a..022d9c8e0a974 100644 --- a/src/batch/benches/sort.rs +++ b/src/batch/benches/sort.rs @@ -16,14 +16,14 @@ pub mod utils; use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use risingwave_batch::executor::{BoxedExecutor, SortExecutor}; -use risingwave_common::enable_jemalloc_on_unix; +use risingwave_common::enable_jemalloc; use risingwave_common::memory::MemoryContext; use risingwave_common::types::DataType; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use tokio::runtime::Runtime; use utils::{create_input, execute_executor}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_order_by_executor( chunk_size: usize, diff --git a/src/batch/benches/top_n.rs b/src/batch/benches/top_n.rs index a02a5b401860b..a24dc4e741446 100644 --- a/src/batch/benches/top_n.rs +++ b/src/batch/benches/top_n.rs @@ -16,14 +16,14 @@ pub mod utils; use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use risingwave_batch::executor::{BoxedExecutor, TopNExecutor}; -use risingwave_common::enable_jemalloc_on_unix; +use risingwave_common::enable_jemalloc; use risingwave_common::memory::MemoryContext; use risingwave_common::types::DataType; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use tokio::runtime::Runtime; use utils::{create_input, execute_executor}; -enable_jemalloc_on_unix!(); +enable_jemalloc!(); fn create_top_n_executor( chunk_size: usize, diff --git a/src/batch/src/exchange_source.rs b/src/batch/src/exchange_source.rs index 5c34922a7c6df..e5aaa295e5dc2 100644 --- a/src/batch/src/exchange_source.rs +++ b/src/batch/src/exchange_source.rs @@ -16,6 +16,7 @@ use std::fmt::Debug; use std::future::Future; use risingwave_common::array::DataChunk; +use risingwave_common::error::Result; use crate::execution::grpc_exchange::GrpcExchangeSource; use crate::execution::local_exchange::LocalExchangeSource; @@ -24,11 +25,7 @@ use crate::task::TaskId; /// Each `ExchangeSource` maps to one task, it takes the execution result from task chunk by chunk. pub trait ExchangeSource: Send + Debug { - type TakeDataFuture<'a>: Future>> - + 'a - where - Self: 'a; - fn take_data(&mut self) -> Self::TakeDataFuture<'_>; + fn take_data(&mut self) -> impl Future>> + '_; /// Get upstream task id. fn get_task_id(&self) -> TaskId; @@ -42,9 +39,7 @@ pub enum ExchangeSourceImpl { } impl ExchangeSourceImpl { - pub(crate) async fn take_data( - &mut self, - ) -> risingwave_common::error::Result> { + pub(crate) async fn take_data(&mut self) -> Result> { match self { ExchangeSourceImpl::Grpc(grpc) => grpc.take_data().await, ExchangeSourceImpl::Local(local) => local.take_data().await, diff --git a/src/batch/src/execution/grpc_exchange.rs b/src/batch/src/execution/grpc_exchange.rs index 21705ab634c29..1ec24e5b440fb 100644 --- a/src/batch/src/execution/grpc_exchange.rs +++ b/src/batch/src/execution/grpc_exchange.rs @@ -13,7 +13,6 @@ // limitations under the License. use std::fmt::{Debug, Formatter}; -use std::future::Future; use futures::StreamExt; use risingwave_common::array::DataChunk; @@ -73,26 +72,22 @@ impl Debug for GrpcExchangeSource { } impl ExchangeSource for GrpcExchangeSource { - type TakeDataFuture<'a> = impl Future>> + 'a; - - fn take_data(&mut self) -> Self::TakeDataFuture<'_> { - async { - let res = match self.stream.next().await { - None => { - return Ok(None); - } - Some(r) => r, - }; - let task_data = res?; - let data = DataChunk::from_protobuf(task_data.get_record_batch()?)?.compact(); - trace!( - "Receiver taskOutput = {:?}, data = {:?}", - self.task_output_id, - data - ); + async fn take_data(&mut self) -> Result> { + let res = match self.stream.next().await { + None => { + return Ok(None); + } + Some(r) => r, + }; + let task_data = res?; + let data = DataChunk::from_protobuf(task_data.get_record_batch()?)?.compact(); + trace!( + "Receiver taskOutput = {:?}, data = {:?}", + self.task_output_id, + data + ); - Ok(Some(data)) - } + Ok(Some(data)) } fn get_task_id(&self) -> TaskId { diff --git a/src/batch/src/execution/local_exchange.rs b/src/batch/src/execution/local_exchange.rs index b28687c5d25c2..c08bd6a7ef145 100644 --- a/src/batch/src/execution/local_exchange.rs +++ b/src/batch/src/execution/local_exchange.rs @@ -13,7 +13,6 @@ // limitations under the License. use std::fmt::{Debug, Formatter}; -use std::future::Future; use risingwave_common::array::DataChunk; use risingwave_common::error::Result; @@ -52,23 +51,19 @@ impl Debug for LocalExchangeSource { } impl ExchangeSource for LocalExchangeSource { - type TakeDataFuture<'a> = impl Future>> + 'a; - - fn take_data(&mut self) -> Self::TakeDataFuture<'_> { - async { - let ret = self.task_output.direct_take_data().await?; - if let Some(data) = ret { - let data = data.compact(); - trace!( - "Receiver task: {:?}, source task output: {:?}, data: {:?}", - self.task_id, - self.task_output.id(), - data - ); - Ok(Some(data)) - } else { - Ok(None) - } + async fn take_data(&mut self) -> Result> { + let ret = self.task_output.direct_take_data().await?; + if let Some(data) = ret { + let data = data.compact(); + trace!( + "Receiver task: {:?}, source task output: {:?}, data: {:?}", + self.task_id, + self.task_output.id(), + data + ); + Ok(Some(data)) + } else { + Ok(None) } } diff --git a/src/batch/src/executor/aggregation/distinct.rs b/src/batch/src/executor/aggregation/distinct.rs index 6723cd9b926ac..c2844d558acf4 100644 --- a/src/batch/src/executor/aggregation/distinct.rs +++ b/src/batch/src/executor/aggregation/distinct.rs @@ -20,7 +20,7 @@ use risingwave_common::buffer::BitmapBuilder; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::types::{DataType, Datum}; -use risingwave_expr::agg::{ +use risingwave_expr::aggregate::{ AggStateDyn, AggregateFunction, AggregateState, BoxedAggregateFunction, }; use risingwave_expr::Result; @@ -83,7 +83,7 @@ impl AggregateFunction for Distinct { let state = state.downcast_mut::(); let mut bitmap_builder = BitmapBuilder::with_capacity(input.capacity()); - bitmap_builder.append_bitmap(&input.data_chunk().vis().to_bitmap()); + bitmap_builder.append_bitmap(input.data_chunk().visibility()); for row_id in range.clone() { let (row_ref, vis) = input.data_chunk().row_at(row_id); let row = row_ref.to_owned_row(); @@ -94,7 +94,7 @@ impl AggregateFunction for Distinct { } bitmap_builder.set(row_id, b); } - let input = input.with_visibility(bitmap_builder.finish().into()); + let input = input.clone_with_vis(bitmap_builder.finish()); self.inner .update_range(&mut state.inner, &input, range) .await @@ -112,7 +112,7 @@ mod tests { use risingwave_common::array::StreamChunk; use risingwave_common::test_prelude::StreamChunkTestExt; use risingwave_common::types::{Datum, Decimal}; - use risingwave_expr::agg::AggCall; + use risingwave_expr::aggregate::AggCall; use super::super::build; diff --git a/src/batch/src/executor/aggregation/filter.rs b/src/batch/src/executor/aggregation/filter.rs index 490bea5c342b3..9cfbeabffe417 100644 --- a/src/batch/src/executor/aggregation/filter.rs +++ b/src/batch/src/executor/aggregation/filter.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use risingwave_common::array::StreamChunk; use risingwave_common::types::{DataType, Datum}; -use risingwave_expr::agg::{AggregateFunction, AggregateState, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggregateFunction, AggregateState, BoxedAggregateFunction}; use risingwave_expr::expr::Expression; use risingwave_expr::Result; @@ -62,7 +62,7 @@ impl AggregateFunction for Filter { .as_bool() .to_bitmap(); let mut input1 = input.clone(); - input1.set_vis(input.vis() & &bitmap); + input1.set_visibility(input.visibility() & &bitmap); self.inner.update_range(state, &input1, range).await } @@ -74,8 +74,8 @@ impl AggregateFunction for Filter { #[cfg(test)] mod tests { use risingwave_common::test_prelude::StreamChunkTestExt; - use risingwave_expr::agg::{build, AggCall}; - use risingwave_expr::expr::{build_from_pretty, Expression, LiteralExpression}; + use risingwave_expr::aggregate::{build_append_only, AggCall}; + use risingwave_expr::expr::{build_from_pretty, ExpressionBoxExt, LiteralExpression}; use super::*; @@ -84,7 +84,7 @@ mod tests { let condition = LiteralExpression::new(DataType::Boolean, Some(true.into())).boxed(); let agg = Filter::new( condition.into(), - build(&AggCall::from_pretty("(count:int8 $0:int8)")).unwrap(), + build_append_only(&AggCall::from_pretty("(count:int8 $0:int8)")).unwrap(), ); let mut state = agg.create_state(); @@ -113,7 +113,7 @@ mod tests { let expr = build_from_pretty("(greater_than:boolean $0:int8 5:int8)"); let agg = Filter::new( expr.into(), - build(&AggCall::from_pretty("(count:int8 $0:int8)")).unwrap(), + build_append_only(&AggCall::from_pretty("(count:int8 $0:int8)")).unwrap(), ); let mut state = agg.create_state(); @@ -145,7 +145,7 @@ mod tests { let expr = build_from_pretty("(equal:boolean $0:int8 null:int8)"); let agg = Filter::new( expr.into(), - build(&AggCall::from_pretty("(count:int8 $0:int8)")).unwrap(), + build_append_only(&AggCall::from_pretty("(count:int8 $0:int8)")).unwrap(), ); let mut state = agg.create_state(); diff --git a/src/batch/src/executor/aggregation/mod.rs b/src/batch/src/executor/aggregation/mod.rs index 64191efa14dd9..e756b126f013b 100644 --- a/src/batch/src/executor/aggregation/mod.rs +++ b/src/batch/src/executor/aggregation/mod.rs @@ -20,7 +20,7 @@ mod filter; mod orderby; mod projection; -use risingwave_expr::agg::{self, AggCall, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{build_append_only, AggCall, BoxedAggregateFunction}; use risingwave_expr::Result; use self::distinct::Distinct; @@ -30,7 +30,7 @@ use self::projection::Projection; /// Build an `BoxedAggregateFunction` from `AggCall`. pub fn build(agg: &AggCall) -> Result { - let mut aggregator = agg::build(agg)?; + let mut aggregator = build_append_only(agg)?; if agg.distinct { aggregator = Box::new(Distinct::new(aggregator)); diff --git a/src/batch/src/executor/aggregation/orderby.rs b/src/batch/src/executor/aggregation/orderby.rs index 47ef7189bb6c4..427fa15688ca3 100644 --- a/src/batch/src/executor/aggregation/orderby.rs +++ b/src/batch/src/executor/aggregation/orderby.rs @@ -22,7 +22,7 @@ use risingwave_common::types::{DataType, Datum}; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; use risingwave_common::util::memcmp_encoding; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use risingwave_expr::agg::{ +use risingwave_expr::aggregate::{ AggStateDyn, AggregateFunction, AggregateState, BoxedAggregateFunction, }; use risingwave_expr::{ExprError, Result}; @@ -151,7 +151,7 @@ impl AggregateFunction for ProjectionOrderBy { mod tests { use risingwave_common::array::{ListValue, StreamChunk}; use risingwave_common::test_prelude::StreamChunkTestExt; - use risingwave_expr::agg::AggCall; + use risingwave_expr::aggregate::AggCall; use super::super::build; diff --git a/src/batch/src/executor/aggregation/projection.rs b/src/batch/src/executor/aggregation/projection.rs index 890d8da58f1a8..00343891e9ae6 100644 --- a/src/batch/src/executor/aggregation/projection.rs +++ b/src/batch/src/executor/aggregation/projection.rs @@ -16,7 +16,7 @@ use std::ops::Range; use risingwave_common::array::StreamChunk; use risingwave_common::types::{DataType, Datum}; -use risingwave_expr::agg::{AggregateFunction, AggregateState, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggregateFunction, AggregateState, BoxedAggregateFunction}; use risingwave_expr::Result; pub struct Projection { diff --git a/src/batch/src/executor/group_top_n.rs b/src/batch/src/executor/group_top_n.rs index 32f8a8b73c61e..1b76bb6e6e997 100644 --- a/src/batch/src/executor/group_top_n.rs +++ b/src/batch/src/executor/group_top_n.rs @@ -196,14 +196,19 @@ impl GroupTopNExecutor { #[for_await] for chunk in self.child.execute() { - let chunk = Arc::new(chunk?.compact()); + let chunk = Arc::new(chunk?); let keys = K::build(self.group_key.as_slice(), &chunk)?; - for (row_id, (encoded_row, key)) in encode_chunk(&chunk, &self.column_orders)? - .into_iter() - .zip_eq_fast(keys.into_iter()) - .enumerate() + for (row_id, ((encoded_row, key), visible)) in + encode_chunk(&chunk, &self.column_orders)? + .into_iter() + .zip_eq_fast(keys.into_iter()) + .zip_eq_fast(chunk.visibility().iter()) + .enumerate() { + if !visible { + continue; + } let heap = groups.entry(key).or_insert_with(|| { TopNHeap::new( self.limit, diff --git a/src/batch/src/executor/hash_agg.rs b/src/batch/src/executor/hash_agg.rs index 283a4caf4f80c..03ce86d475620 100644 --- a/src/batch/src/executor/hash_agg.rs +++ b/src/batch/src/executor/hash_agg.rs @@ -24,7 +24,7 @@ use risingwave_common::hash::{HashKey, HashKeyDispatcher, PrecomputedBuildHasher use risingwave_common::memory::MemoryContext; use risingwave_common::types::DataType; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::{AggCall, AggregateState, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggCall, AggregateState, BoxedAggregateFunction}; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::HashAggNode; @@ -223,10 +223,17 @@ impl HashAggExecutor { // consume all chunks to compute the agg result #[for_await] for chunk in self.child.execute() { - let chunk = StreamChunk::from(chunk?.compact()); + let chunk = StreamChunk::from(chunk?); let keys = K::build(self.group_key_columns.as_slice(), &chunk)?; let mut memory_usage_diff = 0; - for (row_id, key) in keys.into_iter().enumerate() { + for (row_id, (key, visible)) in keys + .into_iter() + .zip_eq_fast(chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } let mut new_group = false; let states = groups.entry(key).or_insert_with(|| { new_group = true; diff --git a/src/batch/src/executor/hop_window.rs b/src/batch/src/executor/hop_window.rs index 06e257456dd5d..01ba275e730ee 100644 --- a/src/batch/src/executor/hop_window.rs +++ b/src/batch/src/executor/hop_window.rs @@ -16,7 +16,7 @@ use std::num::NonZeroUsize; use futures_async_stream::try_stream; use itertools::Itertools; -use risingwave_common::array::{DataChunk, Vis}; +use risingwave_common::array::DataChunk; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::error::{Result, RwError}; use risingwave_common::types::{DataType, Interval}; @@ -173,7 +173,7 @@ impl HopWindowExecutor { #[for_await] for data_chunk in child.execute() { let data_chunk = data_chunk?; - assert!(matches!(data_chunk.vis(), Vis::Compact(_))); + assert!(data_chunk.is_compacted()); let len = data_chunk.cardinality(); for i in 0..units { let window_start_col = if output_indices.contains(&window_start_col_index) { @@ -239,7 +239,7 @@ mod tests { 6 2 ^10:42:00 7 1 ^10:51:00 8 3 ^11:02:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ); let mut mock_executor = MockExecutor::new(schema.clone()); mock_executor.add(chunk); @@ -326,7 +326,7 @@ mod tests { 6 2 ^10:42:00 ^10:14:00 ^10:44:00 7 1 ^10:51:00 ^10:29:00 ^10:59:00 8 3 ^11:02:00 ^10:44:00 ^11:14:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); assert_eq!( @@ -341,7 +341,7 @@ mod tests { 6 2 ^10:42:00 ^10:14:00 ^10:44:00 7 1 ^10:51:00 ^10:29:00 ^10:59:00 8 3 ^11:02:00 ^10:44:00 ^11:14:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); } @@ -371,7 +371,7 @@ mod tests { 6 2 ^10:42:00 ^10:15:00 ^10:45:00 7 1 ^10:51:00 ^10:30:00 ^11:00:00 8 3 ^11:02:00 ^10:45:00 ^11:15:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); @@ -388,7 +388,7 @@ mod tests { 6 2 ^10:42:00 ^10:30:00 ^11:00:00 7 1 ^10:51:00 ^10:45:00 ^11:15:00 8 3 ^11:02:00 ^11:00:00 ^11:30:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); } @@ -415,7 +415,7 @@ mod tests { 2 ^10:15:00 ^10:45:00 ^10:42:00 1 ^10:30:00 ^11:00:00 ^10:51:00 3 ^10:45:00 ^11:15:00 ^11:02:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); @@ -432,7 +432,7 @@ mod tests { 2 ^10:30:00 ^11:00:00 ^10:42:00 1 ^10:45:00 ^11:15:00 ^10:51:00 3 ^11:00:00 ^11:30:00 ^11:02:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); } diff --git a/src/batch/src/executor/insert.rs b/src/batch/src/executor/insert.rs index e0f4816e9a4bd..28e8dda8a5979 100644 --- a/src/batch/src/executor/insert.rs +++ b/src/batch/src/executor/insert.rs @@ -157,8 +157,7 @@ impl InsertExecutor { columns.insert(row_id_index, Arc::new(row_id_col.into())) } - let stream_chunk = - StreamChunk::new(vec![Op::Insert; cap], columns, vis.into_visibility()); + let stream_chunk = StreamChunk::with_visibility(vec![Op::Insert; cap], columns, vis); #[cfg(debug_assertions)] table_dml_handle.check_chunk_schema(&stream_chunk); diff --git a/src/batch/src/executor/join/hash_join.rs b/src/batch/src/executor/join/hash_join.rs index a1badeadf3873..0f5a0788b23ec 100644 --- a/src/batch/src/executor/join/hash_join.rs +++ b/src/batch/src/executor/join/hash_join.rs @@ -231,7 +231,7 @@ impl HashJoinExecutor { let mut build_row_count = 0; #[for_await] for build_chunk in self.build_side_source.execute() { - let build_chunk = build_chunk?.compact(); + let build_chunk = build_chunk?; if build_chunk.cardinality() > 0 { build_row_count += build_chunk.cardinality(); self.mem_ctx.add(build_chunk.estimated_heap_size() as i64); @@ -252,8 +252,15 @@ impl HashJoinExecutor { for (build_chunk_id, build_chunk) in build_side.iter().enumerate() { let build_keys = K::build(&self.build_key_idxs, build_chunk)?; - for (build_row_id, build_key) in build_keys.into_iter().enumerate() { + for (build_row_id, (build_key, visible)) in build_keys + .into_iter() + .zip_eq_fast(build_chunk.visibility().iter()) + .enumerate() + { self.shutdown_rx.check()?; + if !visible { + continue; + } // Only insert key to hash map if it is consistent with the null safe restriction. if build_key.null_bitmap().is_subset(&null_matched) { let row_id = RowId::new(build_chunk_id, build_row_id); @@ -348,7 +355,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -404,7 +418,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { for build_row_id in next_build_row_with_same_key.row_id_iter(Some(*first_matched_build_row_id)) @@ -466,7 +487,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } non_equi_state.found_matched = false; non_equi_state .first_output_row_id @@ -537,7 +565,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } shutdown_rx.check()?; if !ANTI_JOIN { if hash_map.get(probe_key).is_some() { @@ -594,7 +629,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } non_equi_state .first_output_row_id .push(chunk_builder.buffered_count()); @@ -662,7 +704,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } non_equi_state.found_matched = false; if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { non_equi_state @@ -737,7 +786,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -795,7 +851,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -860,7 +923,13 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for probe_key in &probe_keys { + for (probe_key, visible) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + { + if !visible { + continue; + } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -908,7 +977,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -974,7 +1050,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { for build_row_id in next_build_row_with_same_key.row_id_iter(Some(*first_matched_build_row_id)) @@ -1049,7 +1132,14 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build(&probe_key_idxs, &probe_chunk)?; - for (probe_row_id, probe_key) in probe_keys.iter().enumerate() { + for (probe_row_id, (probe_key, visible)) in probe_keys + .iter() + .zip_eq_fast(probe_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } left_non_equi_state.found_matched = false; if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { left_non_equi_state @@ -1905,8 +1995,8 @@ mod tests { } fn is_data_chunk_eq(left: &DataChunk, right: &DataChunk) -> bool { - assert!(left.visibility().is_none()); - assert!(right.visibility().is_none()); + assert!(left.is_compacted()); + assert!(right.is_compacted()); if left.cardinality() != right.cardinality() { return false; diff --git a/src/batch/src/executor/join/local_lookup_join.rs b/src/batch/src/executor/join/local_lookup_join.rs index af2bd80f24198..31d101c1ecfa3 100644 --- a/src/batch/src/executor/join/local_lookup_join.rs +++ b/src/batch/src/executor/join/local_lookup_join.rs @@ -692,7 +692,7 @@ mod tests { 2 5.5 2 5.5 2 8.4 2 5.5", ); - let condition = build_from_pretty("(less_than:boolean (cast:float4 5:int4) $3:float4)"); + let condition = build_from_pretty("(less_than:boolean 5:float4 $3:float4)"); do_test(JoinType::Inner, Some(condition), false, expected).await; } @@ -709,7 +709,7 @@ mod tests { 5 9.1 . . . . . .", ); - let condition = build_from_pretty("(less_than:boolean (cast:float4 5:int4) $3:float4)"); + let condition = build_from_pretty("(less_than:boolean 5:float4 $3:float4)"); do_test(JoinType::LeftOuter, Some(condition), false, expected).await; } @@ -722,7 +722,7 @@ mod tests { 2 5.5 2 8.4", ); - let condition = build_from_pretty("(less_than:boolean (cast:float4 5:int4) $3:float4)"); + let condition = build_from_pretty("(less_than:boolean 5:float4 $3:float4)"); do_test(JoinType::LeftSemi, Some(condition), false, expected).await; } @@ -736,7 +736,7 @@ mod tests { 5 9.1 . .", ); - let condition = build_from_pretty("(less_than:boolean (cast:float4 5:int4) $3:float4)"); + let condition = build_from_pretty("(less_than:boolean 5:float4 $3:float4)"); do_test(JoinType::LeftAnti, Some(condition), false, expected).await; } diff --git a/src/batch/src/executor/join/lookup_join_base.rs b/src/batch/src/executor/join/lookup_join_base.rs index d60c399ef9941..3e754ab9f012f 100644 --- a/src/batch/src/executor/join/lookup_join_base.rs +++ b/src/batch/src/executor/join/lookup_join_base.rs @@ -25,6 +25,7 @@ use risingwave_common::memory::MemoryContext; use risingwave_common::row::Row; use risingwave_common::types::{DataType, ToOwnedDatum}; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; +use risingwave_common::util::iter_util::ZipEqFast; use risingwave_common::util::sort_util::{cmp_datum_iter, OrderType}; use risingwave_expr::expr::BoxedExpression; @@ -128,7 +129,7 @@ impl LookupJoinBase { let mut build_row_count = 0; #[for_await] for build_chunk in hash_join_build_side_input.execute() { - let build_chunk = build_chunk?.compact(); + let build_chunk = build_chunk?; if build_chunk.cardinality() > 0 { build_row_count += build_chunk.cardinality(); self.mem_ctx.add(build_chunk.estimated_heap_size() as i64); @@ -147,7 +148,14 @@ impl LookupJoinBase { for (build_chunk_id, build_chunk) in build_side.iter().enumerate() { let build_keys = K::build(&hash_join_build_side_key_idxs, build_chunk)?; - for (build_row_id, build_key) in build_keys.into_iter().enumerate() { + for (build_row_id, (build_key, visible)) in build_keys + .into_iter() + .zip_eq_fast(build_chunk.visibility().iter()) + .enumerate() + { + if !visible { + continue; + } // Only insert key to hash map if it is consistent with the null safe // restriction. if build_key.null_bitmap().is_subset(&null_matched) { diff --git a/src/batch/src/executor/join/mod.rs b/src/batch/src/executor/join/mod.rs index 996e45e26f031..320c4a27a037d 100644 --- a/src/batch/src/executor/join/mod.rs +++ b/src/batch/src/executor/join/mod.rs @@ -26,7 +26,7 @@ use itertools::Itertools; pub use local_lookup_join::*; pub use lookup_join_base::*; pub use nested_loop_join::*; -use risingwave_common::array::{DataChunk, RowRef, Vis}; +use risingwave_common::array::{DataChunk, RowRef}; use risingwave_common::error::Result; use risingwave_common::row::Row; use risingwave_common::types::{DataType, DatumRef}; @@ -124,10 +124,10 @@ fn concatenate(left: &DataChunk, right: &DataChunk) -> Result { concated_columns.extend_from_slice(left.columns()); concated_columns.extend_from_slice(right.columns()); // Only handle one side is constant row chunk: One of visibility must be None. - let vis = match (left.vis(), right.vis()) { - (Vis::Compact(_), _) => right.vis().clone(), - (_, Vis::Compact(_)) => left.vis().clone(), - (Vis::Bitmap(_), Vis::Bitmap(_)) => { + let vis = match (left.is_compacted(), right.is_compacted()) { + (true, _) => right.visibility().clone(), + (_, true) => left.visibility().clone(), + (false, false) => { return Err(BatchError::UnsupportedFunction( "The concatenate behaviour of two chunk with visibility is undefined".to_string(), ) @@ -176,7 +176,8 @@ fn convert_row_to_chunk( #[cfg(test)] mod tests { - use risingwave_common::array::{Array, ArrayBuilder, DataChunk, PrimitiveArrayBuilder, Vis}; + use risingwave_common::array::{Array, ArrayBuilder, DataChunk, PrimitiveArrayBuilder}; + use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::row::Row; use risingwave_common::types::{DataType, ScalarRefImpl}; @@ -196,20 +197,14 @@ mod tests { let arr = builder.finish(); columns.push(arr.into_ref()) } - let chunk1: DataChunk = DataChunk::new(columns.clone(), length); - let bool_vec = vec![true, false, true, false, false]; - let chunk2: DataChunk = DataChunk::new( - columns.clone(), - Vis::Bitmap((bool_vec.clone()).into_iter().collect()), - ); + let chunk1 = DataChunk::new(columns.clone(), length); + let visibility = Bitmap::from_bool_slice(&[true, false, true, false, false]); + let chunk2 = DataChunk::new(columns.clone(), visibility.clone()); let chunk = concatenate(&chunk1, &chunk2).unwrap(); assert_eq!(chunk.capacity(), chunk1.capacity()); assert_eq!(chunk.capacity(), chunk2.capacity()); assert_eq!(chunk.columns().len(), chunk1.columns().len() * 2); - assert_eq!( - chunk.visibility().cloned().unwrap(), - (bool_vec).into_iter().collect() - ); + assert_eq!(chunk.visibility(), &visibility); } /// Test the function of convert row into constant row chunk (one row repeat multiple times). diff --git a/src/batch/src/executor/join/nested_loop_join.rs b/src/batch/src/executor/join/nested_loop_join.rs index 21409e568ce6d..a52faf35dc724 100644 --- a/src/batch/src/executor/join/nested_loop_join.rs +++ b/src/batch/src/executor/join/nested_loop_join.rs @@ -389,7 +389,7 @@ impl NestedLoopJoinExecutor { .await?; if chunk.cardinality() > 0 { // chunk.visibility() must be Some(_) - matched = &matched | chunk.visibility().unwrap(); + matched = &matched | chunk.visibility(); for spilled in chunk_builder.append_chunk(chunk) { yield spilled } @@ -433,7 +433,7 @@ impl NestedLoopJoinExecutor { .await?; if chunk.cardinality() > 0 { // chunk.visibility() must be Some(_) - matched = &matched | chunk.visibility().unwrap(); + matched = &matched | chunk.visibility(); } } if ANTI_JOIN { @@ -475,7 +475,7 @@ impl NestedLoopJoinExecutor { .await?; if chunk.cardinality() > 0 { left_matched.set(left_row_idx, true); - right_matched = &right_matched | chunk.visibility().unwrap(); + right_matched = &right_matched | chunk.visibility(); for spilled in chunk_builder.append_chunk(chunk) { yield spilled } diff --git a/src/batch/src/executor/limit.rs b/src/batch/src/executor/limit.rs index 970b9a2815828..7828974d64c03 100644 --- a/src/batch/src/executor/limit.rs +++ b/src/batch/src/executor/limit.rs @@ -91,8 +91,8 @@ impl LimitExecutor { } // process chunk let mut new_vis; - if let Some(old_vis) = data_chunk.visibility() { - new_vis = old_vis.iter().collect_vec(); + if !data_chunk.is_compacted() { + new_vis = data_chunk.visibility().iter().collect_vec(); for vis in new_vis.iter_mut().filter(|x| **x) { if skipped < self.offset { skipped += 1; diff --git a/src/batch/src/executor/project_set.rs b/src/batch/src/executor/project_set.rs index 670933a6bb50c..fa3dfac917e8a 100644 --- a/src/batch/src/executor/project_set.rs +++ b/src/batch/src/executor/project_set.rs @@ -171,7 +171,7 @@ mod tests { use risingwave_common::catalog::{Field, Schema}; use risingwave_common::test_prelude::*; use risingwave_common::types::DataType; - use risingwave_expr::expr::{Expression, InputRefExpression, LiteralExpression}; + use risingwave_expr::expr::{ExpressionBoxExt, InputRefExpression, LiteralExpression}; use risingwave_expr::table_function::repeat; use super::*; diff --git a/src/batch/src/executor/sort_agg.rs b/src/batch/src/executor/sort_agg.rs index 8a1427ffc6d6a..e8f07bc5fa5d9 100644 --- a/src/batch/src/executor/sort_agg.rs +++ b/src/batch/src/executor/sort_agg.rs @@ -20,7 +20,7 @@ use risingwave_common::array::{Array, ArrayBuilderImpl, ArrayImpl, DataChunk, St use risingwave_common::catalog::{Field, Schema}; use risingwave_common::error::{Result, RwError}; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::{AggCall, AggregateState, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggCall, AggregateState, BoxedAggregateFunction}; use risingwave_expr::expr::{build_from_prost, BoxedExpression}; use risingwave_pb::batch_plan::plan_node::NodeBody; diff --git a/src/batch/src/executor/sort_over_window.rs b/src/batch/src/executor/sort_over_window.rs index f12ebc2452384..c8b6c7ef9388c 100644 --- a/src/batch/src/executor/sort_over_window.rs +++ b/src/batch/src/executor/sort_over_window.rs @@ -19,8 +19,9 @@ use risingwave_common::error::{Result, RwError}; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::function::window::WindowFuncCall; -use risingwave_expr::window_function::{create_window_state, StateKey, WindowStates}; +use risingwave_expr::window_function::{ + create_window_state, StateKey, WindowFuncCall, WindowStates, +}; use risingwave_pb::batch_plan::plan_node::NodeBody; use super::{BoxedDataChunkStream, BoxedExecutor, BoxedExecutorBuilder, Executor, ExecutorBuilder}; diff --git a/src/batch/src/executor/source.rs b/src/batch/src/executor/source.rs index 78733420c9158..8bf9fc5b7e610 100644 --- a/src/batch/src/executor/source.rs +++ b/src/batch/src/executor/source.rs @@ -30,7 +30,6 @@ use risingwave_connector::source::{ }; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_source::connector_source::ConnectorSource; -use risingwave_source::source_desc::extract_source_struct; use super::Executor; use crate::error::BatchError; @@ -71,9 +70,7 @@ impl BoxedExecutorBuilder for SourceExecutor { .map_err(|e| RwError::from(ConnectorError(e.into())))?; let info = source_node.get_info().unwrap(); - let source_struct = extract_source_struct(info)?; - let parser_config = - SpecificParserConfig::new(source_struct, info, &source_node.properties)?; + let parser_config = SpecificParserConfig::new(info, &source_node.properties)?; let columns: Vec<_> = source_node .columns @@ -174,7 +171,7 @@ impl SourceExecutor { fn covert_stream_chunk_to_batch_chunk(chunk: StreamChunk) -> Result { // chunk read from source must be compact - assert!(chunk.data_chunk().visibility().is_none()); + assert!(chunk.data_chunk().is_compacted()); if chunk.ops().iter().any(|op| *op != Op::Insert) { return Err(RwError::from(BatchError::Internal(anyhow!( diff --git a/src/batch/src/executor/test_utils.rs b/src/batch/src/executor/test_utils.rs index e6840ff3ea396..153184b9bdffc 100644 --- a/src/batch/src/executor/test_utils.rs +++ b/src/batch/src/executor/test_utils.rs @@ -13,7 +13,6 @@ // limitations under the License. use std::collections::VecDeque; -use std::future::Future; use assert_matches::assert_matches; use futures::StreamExt; @@ -220,8 +219,8 @@ pub async fn diff_executor_output(actual: BoxedExecutor, expect: BoxedExecutor) } fn is_data_chunk_eq(left: &DataChunk, right: &DataChunk) { - assert!(left.visibility().is_none()); - assert!(right.visibility().is_none()); + assert!(left.is_compacted()); + assert!(right.is_compacted()); assert_eq!( left.cardinality(), @@ -246,15 +245,11 @@ impl FakeExchangeSource { } impl ExchangeSource for FakeExchangeSource { - type TakeDataFuture<'a> = impl Future>> + 'a; - - fn take_data(&mut self) -> Self::TakeDataFuture<'_> { - async { - if let Some(chunk) = self.chunks.pop() { - Ok(chunk) - } else { - Ok(None) - } + async fn take_data(&mut self) -> Result> { + if let Some(chunk) = self.chunks.pop() { + Ok(chunk) + } else { + Ok(None) } } diff --git a/src/batch/src/executor/update.rs b/src/batch/src/executor/update.rs index cf71a0f97ab46..e3d2d9f03bf3a 100644 --- a/src/batch/src/executor/update.rs +++ b/src/batch/src/executor/update.rs @@ -169,7 +169,7 @@ impl UpdateExecutor { columns.push(column); } - DataChunk::new(columns, data_chunk.vis().clone()) + DataChunk::new(columns, data_chunk.visibility().clone()) }; if self.returning { diff --git a/src/batch/src/lib.rs b/src/batch/src/lib.rs index fe85ecab3223f..809c096eb49df 100644 --- a/src/batch/src/lib.rs +++ b/src/batch/src/lib.rs @@ -15,11 +15,10 @@ #![expect(dead_code)] #![allow(clippy::derive_partial_eq_without_eq)] #![feature(trait_alias)] -#![feature(binary_heap_drain_sorted)] #![feature(exact_size_is_empty)] #![feature(type_alias_impl_trait)] -#![cfg_attr(coverage, feature(no_coverage))] -#![feature(generators)] +#![cfg_attr(coverage, feature(coverage_attribute))] +#![feature(coroutines)] #![feature(proc_macro_hygiene, stmt_expr_attributes)] #![feature(iterator_try_collect)] #![feature(lint_reasons)] @@ -28,7 +27,6 @@ #![feature(let_chains)] #![feature(bound_map)] #![feature(int_roundings)] -#![feature(async_fn_in_trait)] #![feature(allocator_api)] #![feature(impl_trait_in_assoc_type)] #![feature(result_option_inspect)] @@ -47,3 +45,6 @@ pub mod task; extern crate tracing; #[macro_use] extern crate risingwave_common; + +#[cfg(test)] +risingwave_expr_impl::enable!(); diff --git a/src/batch/src/monitor/stats.rs b/src/batch/src/monitor/stats.rs index c9e9dddfa861d..f59e0217cd7a0 100644 --- a/src/batch/src/monitor/stats.rs +++ b/src/batch/src/monitor/stats.rs @@ -168,7 +168,9 @@ impl BatchTaskMetrics { macro_rules! remove { ($({ $metric:ident, $type:ty},)*) => { $( - self.$metric.remove_label_values(&labels).expect("It should not have duplicate task label."); + if let Err(err) = self.$metric.remove_label_values(&labels) { + warn!("Failed to remove label values: {:?}", err); + } )* }; } @@ -409,11 +411,8 @@ impl BatchMetricsWithTaskLabelsInner { impl Drop for BatchMetricsWithTaskLabelsInner { fn drop(&mut self) { - self.task_metrics.delete_task.lock().push(self.task_id()); - self.executor_metrics - .delete_task - .lock() - .push(self.task_id()); + self.task_metrics.add_delete_task(self.task_id()); + self.executor_metrics.add_delete_task(self.task_id()); } } diff --git a/src/batch/src/rpc/service/task_service.rs b/src/batch/src/rpc/service/task_service.rs index b49a023acb22b..fb60e352ec293 100644 --- a/src/batch/src/rpc/service/task_service.rs +++ b/src/batch/src/rpc/service/task_service.rs @@ -53,7 +53,7 @@ impl TaskService for BatchServiceImpl { type CreateTaskStream = ReceiverStream; type ExecuteStream = ReceiverStream; - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn create_task( &self, request: Request, @@ -97,7 +97,7 @@ impl TaskService for BatchServiceImpl { } } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn cancel_task( &self, req: Request, @@ -109,7 +109,7 @@ impl TaskService for BatchServiceImpl { Ok(Response::new(CancelTaskResponse { status: None })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn execute( &self, req: Request, diff --git a/src/batch/src/task/consistent_hash_shuffle_channel.rs b/src/batch/src/task/consistent_hash_shuffle_channel.rs index 336b8c98c2879..a9f3bc1b078ac 100644 --- a/src/batch/src/task/consistent_hash_shuffle_channel.rs +++ b/src/batch/src/task/consistent_hash_shuffle_channel.rs @@ -13,7 +13,6 @@ // limitations under the License. use std::fmt::{Debug, Formatter}; -use std::ops::BitAnd; use std::option::Option; use std::sync::Arc; @@ -89,12 +88,7 @@ fn generate_new_data_chunks( }); let mut res = Vec::with_capacity(output_count); for (sink_id, vis_map_vec) in vis_maps.into_iter().enumerate() { - let vis_map: Bitmap = vis_map_vec.into_iter().collect(); - let vis_map = if let Some(visibility) = chunk.visibility() { - vis_map.bitand(visibility) - } else { - vis_map - }; + let vis_map = Bitmap::from_bool_slice(&vis_map_vec) & chunk.visibility(); let new_data_chunk = chunk.with_visibility(vis_map); trace!( "send to sink:{}, cardinality:{}", diff --git a/src/batch/src/task/hash_shuffle_channel.rs b/src/batch/src/task/hash_shuffle_channel.rs index b07d5091249cd..8ea78a838f18a 100644 --- a/src/batch/src/task/hash_shuffle_channel.rs +++ b/src/batch/src/task/hash_shuffle_channel.rs @@ -13,7 +13,6 @@ // limitations under the License. use std::fmt::{Debug, Formatter}; -use std::ops::BitAnd; use std::option::Option; use std::sync::Arc; @@ -86,12 +85,7 @@ fn generate_new_data_chunks( }); let mut res = Vec::with_capacity(output_count); for (sink_id, vis_map_vec) in vis_maps.into_iter().enumerate() { - let vis_map: Bitmap = vis_map_vec.into_iter().collect(); - let vis_map = if let Some(visibility) = chunk.visibility() { - vis_map.bitand(visibility) - } else { - vis_map - }; + let vis_map = Bitmap::from_bool_slice(&vis_map_vec) & chunk.visibility(); let new_data_chunk = chunk.with_visibility(vis_map); trace!( "send to sink:{}, cardinality:{}", diff --git a/src/batch/src/task/task_execution.rs b/src/batch/src/task/task_execution.rs index 6bd83c5d62c67..445c71ee51d66 100644 --- a/src/batch/src/task/task_execution.rs +++ b/src/batch/src/task/task_execution.rs @@ -656,7 +656,7 @@ impl BatchTaskExecution { let error = error.map(Arc::new); *self.failure.lock() = error.clone().map(to_rw_error); - let err_str = error.as_ref().map(|e| format!("{:?}", e)); + let err_str = error.as_ref().map(|e| e.to_string()); if let Err(e) = sender.close(error).await { match e { SenderError => { diff --git a/src/batch/src/task/task_manager.rs b/src/batch/src/task/task_manager.rs index 858e9bc432b96..2aab5720d0fdf 100644 --- a/src/batch/src/task/task_manager.rs +++ b/src/batch/src/task/task_manager.rs @@ -194,7 +194,7 @@ impl BatchManager { pb_task_output_id: &PbTaskOutputId, ) -> Result<()> { let task_id = TaskOutputId::try_from(pb_task_output_id)?; - tracing::trace!(target: "events::compute::exchange", peer_addr = %peer_addr, from = ?task_id, "serve exchange RPC"); + tracing::debug!(target: "events::compute::exchange", peer_addr = %peer_addr, from = ?task_id, "serve exchange RPC"); let mut task_output = self.take_output(pb_task_output_id)?; self.runtime.spawn(async move { let mut writer = GrpcExchangeWriter::new(tx.clone()); @@ -409,6 +409,8 @@ mod tests { } #[tokio::test] + // see https://github.com/risingwavelabs/risingwave/issues/11979 + #[ignore] async fn test_task_cancel_for_busy_loop() { let manager = Arc::new(BatchManager::new( BatchConfig::default(), @@ -437,6 +439,8 @@ mod tests { } #[tokio::test] + // see https://github.com/risingwavelabs/risingwave/issues/11979 + #[ignore] async fn test_task_abort_for_busy_loop() { let manager = Arc::new(BatchManager::new( BatchConfig::default(), diff --git a/src/bench/Cargo.toml b/src/bench/Cargo.toml index d3c74e385a4fe..31b8f5ce7cc94 100644 --- a/src/bench/Cargo.toml +++ b/src/bench/Cargo.toml @@ -38,7 +38,7 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [ "signal", ] } tokio-stream = "0.1" -toml = "0.7" +toml = "0.8" tracing = "0.1" tracing-subscriber = "0.3.17" diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 46a33654d3545..8ba72d6a24af4 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -25,9 +25,13 @@ risingwave_common = { workspace = true } risingwave_compactor = { workspace = true } risingwave_compute = { workspace = true } risingwave_ctl = { workspace = true } +risingwave_expr_impl = { workspace = true } risingwave_frontend = { workspace = true } -risingwave_meta = { workspace = true } +risingwave_meta_node = { workspace = true } risingwave_rt = { workspace = true } +tikv-jemallocator = { workspace = true, features = [ + "unprefixed_malloc_on_supported_platforms", +] } tokio = { version = "0.2", package = "madsim-tokio", features = [ "rt", "rt-multi-thread", @@ -45,13 +49,6 @@ workspace-hack = { path = "../workspace-hack" } [target.'cfg(enable_task_local_alloc)'.dependencies] task_stats_alloc = { path = "../utils/task_stats_alloc" } -[target.'cfg(unix)'.dependencies] -tikv-jemallocator = { git = "https://github.com/risingwavelabs/jemallocator.git", features = [ - "profiling", - "stats", - "unprefixed_malloc_on_supported_platforms", -], rev = "b7f9f3" } - [[bin]] name = "frontend" path = "src/bin/frontend_node.rs" diff --git a/src/cmd/src/bin/compactor.rs b/src/cmd/src/bin/compactor.rs index 21b7db2405e2d..554168d8a6683 100644 --- a/src/cmd/src/bin/compactor.rs +++ b/src/cmd/src/bin/compactor.rs @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] risingwave_cmd::main!(compactor); diff --git a/src/cmd/src/bin/compute_node.rs b/src/cmd/src/bin/compute_node.rs index 0bb1e5211ac57..a24d132b70b94 100644 --- a/src/cmd/src/bin/compute_node.rs +++ b/src/cmd/src/bin/compute_node.rs @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] risingwave_cmd::main!(compute); diff --git a/src/cmd/src/bin/ctl.rs b/src/cmd/src/bin/ctl.rs index 38345c7a3fc2e..7b4c3132e747d 100644 --- a/src/cmd/src/bin/ctl.rs +++ b/src/cmd/src/bin/ctl.rs @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] risingwave_cmd::main!(ctl); diff --git a/src/cmd/src/bin/frontend_node.rs b/src/cmd/src/bin/frontend_node.rs index 32d563be109fc..546bacbf1a901 100644 --- a/src/cmd/src/bin/frontend_node.rs +++ b/src/cmd/src/bin/frontend_node.rs @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] risingwave_cmd::main!(frontend); diff --git a/src/cmd/src/bin/meta_node.rs b/src/cmd/src/bin/meta_node.rs index 032cc6bc28285..4bebfc5f915a2 100644 --- a/src/cmd/src/bin/meta_node.rs +++ b/src/cmd/src/bin/meta_node.rs @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] risingwave_cmd::main!(meta); diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs index 48cbaaa0e63a2..93df94a63816a 100644 --- a/src/cmd/src/lib.rs +++ b/src/cmd/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(result_option_inspect)] // Copyright 2023 RisingWave Labs // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +17,7 @@ use risingwave_compactor::CompactorOpts; use risingwave_compute::ComputeNodeOpts; use risingwave_ctl::CliOpts as CtlOpts; use risingwave_frontend::FrontendOpts; -use risingwave_meta::MetaNodeOpts; +use risingwave_meta_node::MetaNodeOpts; use risingwave_rt::{init_risingwave_logger, main_okk, LoggerSettings}; /// Define the `main` function for a component. @@ -24,12 +25,12 @@ use risingwave_rt::{init_risingwave_logger, main_okk, LoggerSettings}; macro_rules! main { ($component:ident) => { #[cfg(enable_task_local_alloc)] - risingwave_common::enable_task_local_jemalloc_on_unix!(); + risingwave_common::enable_task_local_jemalloc!(); #[cfg(not(enable_task_local_alloc))] - risingwave_common::enable_jemalloc_on_unix!(); + risingwave_common::enable_jemalloc!(); - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] fn main() { let opts = clap::Parser::parse(); $crate::$component(opts); @@ -37,6 +38,8 @@ macro_rules! main { }; } +risingwave_expr_impl::enable!(); + // Entry point functions. pub fn compute(opts: ComputeNodeOpts) { @@ -46,7 +49,7 @@ pub fn compute(opts: ComputeNodeOpts) { pub fn meta(opts: MetaNodeOpts) { init_risingwave_logger(LoggerSettings::new("meta")); - main_okk(risingwave_meta::start(opts)); + main_okk(risingwave_meta_node::start(opts)); } pub fn frontend(opts: FrontendOpts) { @@ -70,5 +73,8 @@ pub fn ctl(opts: CtlOpts) { .build() .unwrap() .block_on(risingwave_ctl::start(opts)) + .inspect_err(|e| { + eprintln!("{:#?}", e); + }) .unwrap(); } diff --git a/src/cmd_all/Cargo.toml b/src/cmd_all/Cargo.toml index b6907cdebaeff..9a4b34c094196 100644 --- a/src/cmd_all/Cargo.toml +++ b/src/cmd_all/Cargo.toml @@ -29,13 +29,17 @@ risingwave_common = { workspace = true } risingwave_compactor = { workspace = true } risingwave_compute = { workspace = true } risingwave_ctl = { workspace = true } +risingwave_expr_impl = { workspace = true } risingwave_frontend = { workspace = true } -risingwave_meta = { workspace = true } +risingwave_meta_node = { workspace = true } risingwave_rt = { workspace = true } shell-words = "1.1.0" strum = "0.25" strum_macros = "0.25" tempfile = "3" +tikv-jemallocator = { workspace = true, features = [ + "unprefixed_malloc_on_supported_platforms", +] } tokio = { version = "0.2", package = "madsim-tokio", features = [ "rt", "rt-multi-thread", @@ -53,18 +57,15 @@ workspace-hack = { path = "../workspace-hack" } expect-test = "1" [build-dependencies] -vergen = { version = "8", default-features = false, features = ["build", "git", "gitcl"] } +vergen = { version = "8", default-features = false, features = [ + "build", + "git", + "gitcl", +] } [target.'cfg(enable_task_local_alloc)'.dependencies] task_stats_alloc = { path = "../utils/task_stats_alloc" } -[target.'cfg(unix)'.dependencies] -tikv-jemallocator = { git = "https://github.com/risingwavelabs/jemallocator.git", features = [ - "profiling", - "stats", - "unprefixed_malloc_on_supported_platforms", -], rev = "b7f9f3" } - [[bin]] name = "risingwave" path = "src/bin/risingwave.rs" diff --git a/src/cmd_all/scripts/e2e-full-standalone-demo.sh b/src/cmd_all/scripts/e2e-full-standalone-demo.sh index 95e903745e9fb..6c8c01740731b 100755 --- a/src/cmd_all/scripts/e2e-full-standalone-demo.sh +++ b/src/cmd_all/scripts/e2e-full-standalone-demo.sh @@ -62,16 +62,14 @@ echo "--- Starting standalone cluster" ./risedev standalone-demo-full >"$LOG_PREFIX"/standalone.log 2>&1 & STANDALONE_PID=$! -# Wait for rw cluster to finish startup -sleep 10 - -# Make sure the env file is present -set +e -while [ ! -f "$RW_PREFIX"/config/risedev-env ]; do - echo "Waiting for risedev-env to be configured." - sleep 1 -done -set -e +sleep 15 + +# FIXME: Integrate standalone into risedev, so we can reuse risedev-env functionality here. +cat << EOF > "$RW_PREFIX"/config/risedev-env +RW_META_ADDR="http://0.0.0.0:5690" +RW_FRONTEND_LISTEN_ADDRESS="0.0.0.0" +RW_FRONTEND_PORT="4566" +EOF echo "--- Setting up table" ./risedev psql -c " diff --git a/src/cmd_all/scripts/standalone-demo-dev.sh b/src/cmd_all/scripts/standalone-demo-dev.sh index 6a4c124ae74e9..87c1e0ab9b61b 100755 --- a/src/cmd_all/scripts/standalone-demo-dev.sh +++ b/src/cmd_all/scripts/standalone-demo-dev.sh @@ -22,7 +22,6 @@ cargo run -p risingwave_cmd_all \ --listen-addr 127.0.0.1:5688 \ --prometheus-listener-addr 127.0.0.1:1222 \ --advertise-addr 127.0.0.1:5688 \ - --metrics-level info \ --async-stack-trace verbose \ --connector-rpc-endpoint 127.0.0.1:50051 \ --parallelism 4 \ @@ -35,5 +34,9 @@ cargo run -p risingwave_cmd_all \ --advertise-addr 127.0.0.1:4566 \ --prometheus-listener-addr 127.0.0.1:2222 \ --health-check-listener-addr 127.0.0.1:6786 \ - --metrics-level info \ - --meta-addr http://127.0.0.1:5690" \ No newline at end of file + --meta-addr http://127.0.0.1:5690" \ + --compactor-opts=" \ + --listen-addr 127.0.0.1:6660 \ + --prometheus-listener-addr 127.0.0.1:1260 \ + --advertise-addr 127.0.0.1:6660 \ + --meta-address http://127.0.0.1:5690" \ No newline at end of file diff --git a/src/cmd_all/scripts/standalone-demo-full.sh b/src/cmd_all/scripts/standalone-demo-full.sh index 83358fd9f3704..46ca69b982593 100755 --- a/src/cmd_all/scripts/standalone-demo-full.sh +++ b/src/cmd_all/scripts/standalone-demo-full.sh @@ -29,7 +29,6 @@ start_standalone() { --listen-addr 127.0.0.1:5688 \ --prometheus-listener-addr 127.0.0.1:1222 \ --advertise-addr 127.0.0.1:5688 \ - --metrics-level info \ --async-stack-trace verbose \ --connector-rpc-endpoint 127.0.0.1:50051 \ --parallelism 4 \ @@ -42,8 +41,12 @@ start_standalone() { --advertise-addr 127.0.0.1:4566 \ --prometheus-listener-addr 127.0.0.1:2222 \ --health-check-listener-addr 127.0.0.1:6786 \ - --metrics-level info \ - --meta-addr http://127.0.0.1:5690" + --meta-addr http://127.0.0.1:5690" \ + --compactor-opts=" \ + --listen-addr 127.0.0.1:6660 \ + --prometheus-listener-addr 127.0.0.1:1260 \ + --advertise-addr 127.0.0.1:6660 \ + --meta-address http://127.0.0.1:5690" } start_standalone \ No newline at end of file diff --git a/src/cmd_all/src/README.md b/src/cmd_all/src/README.md index 5748d62e6780c..0284817b99a92 100644 --- a/src/cmd_all/src/README.md +++ b/src/cmd_all/src/README.md @@ -8,9 +8,23 @@ This mode is just for local development. It starts with an in-memory etcd store This mode is for production. It provides cli parameters to configure etcd and object store. It will spawn `meta`, `frontend` and `compute` node within a single process. -It will take cli parameters to configure etcd and object store, connector node, and the compactor node. -## Development Notes +You can omit options, and the corresponding node will not be started in the standalone process: -The `cmd_all` module directly calls the entry points functions of `meta`, `frontend` and `compute` modules. -It does so within a single process. \ No newline at end of file +```bash +standalone \ + --meta-opts="..." \ + --frontend-opts="..." + # --compute-opts="..." not provided, so it won't be started. +``` + +### Examples of using standalone mode + +You may run and reference the [demo script](../scripts/e2e-full-standalone-demo.sh), as an example. + +### Internals + +Standalone mode simply passes the options to the corresponding node, and starts them in the same process. + +For example `--meta-opts` is parsed, and then Meta Node's entrypoint, `risingwave_meta_node::start`, is called with the parsed options. +If any option is missing, the corresponding node will not be started. \ No newline at end of file diff --git a/src/cmd_all/src/bin/risingwave.rs b/src/cmd_all/src/bin/risingwave.rs index 5aeffeed7ed5a..b7693c6fa06a2 100644 --- a/src/cmd_all/src/bin/risingwave.rs +++ b/src/cmd_all/src/bin/risingwave.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] use std::str::FromStr; @@ -25,16 +25,16 @@ use risingwave_compactor::CompactorOpts; use risingwave_compute::ComputeNodeOpts; use risingwave_ctl::CliOpts as CtlOpts; use risingwave_frontend::FrontendOpts; -use risingwave_meta::MetaNodeOpts; +use risingwave_meta_node::MetaNodeOpts; use strum::IntoEnumIterator; use strum_macros::{Display, EnumIter, EnumString, IntoStaticStr}; use tracing::Level; #[cfg(enable_task_local_alloc)] -risingwave_common::enable_task_local_jemalloc_on_unix!(); +risingwave_common::enable_task_local_jemalloc!(); #[cfg(not(enable_task_local_alloc))] -risingwave_common::enable_jemalloc_on_unix!(); +risingwave_common::enable_jemalloc!(); const BINARY_NAME: &str = "risingwave"; const VERSION: &str = { @@ -158,7 +158,7 @@ impl Component { } } -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] fn main() -> Result<()> { let risingwave = || { command!(BINARY_NAME) diff --git a/src/cmd_all/src/lib.rs b/src/cmd_all/src/lib.rs index be6a4e1b70a29..c3d5e60f01a05 100644 --- a/src/cmd_all/src/lib.rs +++ b/src/cmd_all/src/lib.rs @@ -20,3 +20,5 @@ mod standalone; pub use playground::*; pub use standalone::*; + +risingwave_expr_impl::enable!(); diff --git a/src/cmd_all/src/playground.rs b/src/cmd_all/src/playground.rs index 9c7820b6dcbe2..76ca89be17c76 100644 --- a/src/cmd_all/src/playground.rs +++ b/src/cmd_all/src/playground.rs @@ -59,10 +59,8 @@ fn get_services(profile: &str) -> (Vec, bool) { "hummock_001", "--advertise-addr", "127.0.0.1:5690", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), - RisingWaveService::Compute(osstrs(["--connector-rpc-endpoint", "127.0.0.1:50051"])), + RisingWaveService::Compute(osstrs([])), RisingWaveService::Frontend(osstrs([])), RisingWaveService::ConnectorNode(osstrs([])), ], @@ -76,32 +74,24 @@ fn get_services(profile: &str) -> (Vec, bool) { "hummock+memory-shared", "--data-directory", "hummock_001", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), RisingWaveService::Compute(osstrs([ "--listen-addr", "127.0.0.1:5687", "--parallelism", "4", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), RisingWaveService::Compute(osstrs([ "--listen-addr", "127.0.0.1:5688", "--parallelism", "4", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), RisingWaveService::Compute(osstrs([ "--listen-addr", "127.0.0.1:5689", "--parallelism", "4", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), RisingWaveService::Frontend(osstrs([])), ], @@ -117,16 +107,12 @@ fn get_services(profile: &str) -> (Vec, bool) { "hummock+memory", "--data-directory", "hummock_001", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), RisingWaveService::Compute(osstrs([ "--listen-addr", "0.0.0.0:5688", "--advertise-addr", "127.0.0.1:5688", - "--connector-rpc-endpoint", - "127.0.0.1:50051", ])), RisingWaveService::Frontend(osstrs([ "--listen-addr", @@ -173,9 +159,9 @@ pub async fn playground(opts: PlaygroundOpts) -> Result<()> { RisingWaveService::Meta(mut opts) => { opts.insert(0, "meta-node".into()); tracing::info!("starting meta-node thread with cli args: {:?}", opts); - let opts = risingwave_meta::MetaNodeOpts::parse_from(opts); + let opts = risingwave_meta_node::MetaNodeOpts::parse_from(opts); let _meta_handle = tokio::spawn(async move { - risingwave_meta::start(opts).await; + risingwave_meta_node::start(opts).await; tracing::warn!("meta is stopped, shutdown all nodes"); // As a playground, it's fine to just kill everything. if idle_exit { diff --git a/src/cmd_all/src/standalone.rs b/src/cmd_all/src/standalone.rs index bd0db110e2f8c..8ebe2c7112c49 100644 --- a/src/cmd_all/src/standalone.rs +++ b/src/cmd_all/src/standalone.rs @@ -14,82 +14,152 @@ use anyhow::Result; use clap::Parser; +use risingwave_compactor::CompactorOpts; +use risingwave_compute::ComputeNodeOpts; +use risingwave_frontend::FrontendOpts; +use risingwave_meta_node::MetaNodeOpts; use shell_words::split; use tokio::signal; -use crate::common::{osstrs, RisingWaveService}; +use crate::common::osstrs; -#[derive(Debug, Clone, Parser)] +#[derive(Eq, PartialOrd, PartialEq, Debug, Clone, Parser)] pub struct StandaloneOpts { /// Compute node options - #[clap(short, long, env = "STANDALONE_COMPUTE_OPTS", default_value = "")] - compute_opts: String, + /// If missing, compute node won't start + #[clap(short, long, env = "RW_STANDALONE_COMPUTE_OPTS")] + compute_opts: Option, - #[clap(short, long, env = "STANDALONE_META_OPTS", default_value = "")] + #[clap(short, long, env = "RW_STANDALONE_META_OPTS")] /// Meta node options - meta_opts: String, + /// If missing, meta node won't start + meta_opts: Option, - #[clap(short, long, env = "STANDALONE_FRONTEND_OPTS", default_value = "")] + #[clap(short, long, env = "RW_STANDALONE_FRONTEND_OPTS")] /// Frontend node options - frontend_opts: String, + /// If missing, frontend node won't start + frontend_opts: Option, + + #[clap(long, env = "RW_STANDALONE_COMPACTOR_OPTS")] + /// Compactor node options + /// If missing compactor node won't start + compactor_opts: Option, + + #[clap(long, env = "RW_STANDALONE_PROMETHEUS_LISTENER_ADDR")] + /// Prometheus listener address + /// If present, it will override prometheus listener address for + /// Frontend, Compute and Compactor nodes + prometheus_listener_addr: Option, + + #[clap(long, env = "RW_STANDALONE_CONFIG_PATH")] + /// Path to the config file + /// If present, it will override config path for + /// Frontend, Compute and Compactor nodes + config_path: Option, } -fn parse_opt_args(opts: &StandaloneOpts) -> (Vec, Vec, Vec) { - let meta_opts = split(&opts.meta_opts).unwrap(); - let compute_opts = split(&opts.compute_opts).unwrap(); - let frontend_opts = split(&opts.frontend_opts).unwrap(); - (meta_opts, compute_opts, frontend_opts) +#[derive(Debug)] +pub struct ParsedStandaloneOpts { + pub meta_opts: Option, + pub compute_opts: Option, + pub frontend_opts: Option, + pub compactor_opts: Option, } -fn get_services(opts: &StandaloneOpts) -> Vec { - let (meta_opts, compute_opts, frontend_opts) = parse_opt_args(opts); - let services = vec![ - RisingWaveService::Meta(osstrs(meta_opts)), - RisingWaveService::Compute(osstrs(compute_opts)), - RisingWaveService::Frontend(osstrs(frontend_opts)), - ]; - services +fn parse_opt_args(opts: &StandaloneOpts) -> ParsedStandaloneOpts { + let meta_opts = opts.meta_opts.as_ref().map(|s| { + let mut s = split(s).unwrap(); + s.insert(0, "meta-node".into()); + s + }); + let mut meta_opts = meta_opts.map(|o| MetaNodeOpts::parse_from(osstrs(o))); + + let compute_opts = opts.compute_opts.as_ref().map(|s| { + let mut s = split(s).unwrap(); + s.insert(0, "compute-node".into()); + s + }); + let mut compute_opts = compute_opts.map(|o| ComputeNodeOpts::parse_from(osstrs(o))); + + let frontend_opts = opts.frontend_opts.as_ref().map(|s| { + let mut s = split(s).unwrap(); + s.insert(0, "frontend-node".into()); + s + }); + let mut frontend_opts = frontend_opts.map(|o| FrontendOpts::parse_from(osstrs(o))); + + let compactor_opts = opts.compactor_opts.as_ref().map(|s| { + let mut s = split(s).unwrap(); + s.insert(0, "compactor-node".into()); + s + }); + let mut compactor_opts = compactor_opts.map(|o| CompactorOpts::parse_from(osstrs(o))); + + if let Some(config_path) = opts.config_path.as_ref() { + if let Some(meta_opts) = meta_opts.as_mut() { + meta_opts.config_path = config_path.clone(); + } + if let Some(compute_opts) = compute_opts.as_mut() { + compute_opts.config_path = config_path.clone(); + } + if let Some(frontend_opts) = frontend_opts.as_mut() { + frontend_opts.config_path = config_path.clone(); + } + if let Some(compactor_opts) = compactor_opts.as_mut() { + compactor_opts.config_path = config_path.clone(); + } + } + if let Some(prometheus_listener_addr) = opts.prometheus_listener_addr.as_ref() { + if let Some(compute_opts) = compute_opts.as_mut() { + compute_opts.prometheus_listener_addr = prometheus_listener_addr.clone(); + } + if let Some(frontend_opts) = frontend_opts.as_mut() { + frontend_opts.prometheus_listener_addr = prometheus_listener_addr.clone(); + } + if let Some(compactor_opts) = compactor_opts.as_mut() { + compactor_opts.prometheus_listener_addr = prometheus_listener_addr.clone(); + } + } + ParsedStandaloneOpts { + meta_opts, + compute_opts, + frontend_opts, + compactor_opts, + } } pub async fn standalone(opts: StandaloneOpts) -> Result<()> { tracing::info!("launching Risingwave in standalone mode"); - let services = get_services(&opts); - - for service in services { - match service { - RisingWaveService::Meta(mut opts) => { - opts.insert(0, "meta-node".into()); - tracing::info!("starting meta-node thread with cli args: {:?}", opts); - let opts = risingwave_meta::MetaNodeOpts::parse_from(opts); - let _meta_handle = tokio::spawn(async move { - risingwave_meta::start(opts).await; - tracing::warn!("meta is stopped, shutdown all nodes"); - }); - // wait for the service to be ready - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - RisingWaveService::Compute(mut opts) => { - opts.insert(0, "compute-node".into()); - tracing::info!("starting compute-node thread with cli args: {:?}", opts); - let opts = risingwave_compute::ComputeNodeOpts::parse_from(opts); - let _compute_handle = - tokio::spawn(async move { risingwave_compute::start(opts).await }); - } - RisingWaveService::Frontend(mut opts) => { - opts.insert(0, "frontend-node".into()); - tracing::info!("starting frontend-node thread with cli args: {:?}", opts); - let opts = risingwave_frontend::FrontendOpts::parse_from(opts); - let _frontend_handle = - tokio::spawn(async move { risingwave_frontend::start(opts).await }); - } - RisingWaveService::Compactor(_) => { - panic!("Compactor node unsupported in Risingwave standalone mode."); - } - RisingWaveService::ConnectorNode(_) => { - panic!("Connector node unsupported in Risingwave standalone mode."); - } - } + let ParsedStandaloneOpts { + meta_opts, + compute_opts, + frontend_opts, + compactor_opts, + } = parse_opt_args(&opts); + + if let Some(opts) = meta_opts { + tracing::info!("starting meta-node thread with cli args: {:?}", opts); + + let _meta_handle = tokio::spawn(async move { + risingwave_meta_node::start(opts).await; + tracing::warn!("meta is stopped, shutdown all nodes"); + }); + // wait for the service to be ready + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + if let Some(opts) = compute_opts { + tracing::info!("starting compute-node thread with cli args: {:?}", opts); + let _compute_handle = tokio::spawn(async move { risingwave_compute::start(opts).await }); + } + if let Some(opts) = frontend_opts { + tracing::info!("starting frontend-node thread with cli args: {:?}", opts); + let _frontend_handle = tokio::spawn(async move { risingwave_frontend::start(opts).await }); + } + if let Some(opts) = compactor_opts { + tracing::info!("starting compactor-node thread with cli args: {:?}", opts); + let _compactor_handle = + tokio::spawn(async move { risingwave_compactor::start(opts).await }); } // wait for log messages to be flushed @@ -122,32 +192,106 @@ mod test { #[test] fn test_parse_opt_args() { + // Test parsing into standalone-level opts. + let raw_opts = " +--compute-opts=--listen-addr 127.0.0.1:8000 --total-memory-bytes 34359738368 --parallelism 10 +--meta-opts=--advertise-addr 127.0.0.1:9999 --data-directory \"some path with spaces\" --listen-addr 127.0.0.1:8001 +--frontend-opts=--config-path=src/config/original.toml +--prometheus-listener-addr=127.0.0.1:1234 +--config-path=src/config/test.toml +"; + let actual = StandaloneOpts::parse_from(raw_opts.lines()); let opts = StandaloneOpts { - compute_opts: "--listen-address 127.0.0.1 --port 8000".into(), - meta_opts: "--data-dir \"some path with spaces\" --port 8001".into(), - frontend_opts: "--some-option".into(), + compute_opts: Some("--listen-addr 127.0.0.1:8000 --total-memory-bytes 34359738368 --parallelism 10".into()), + meta_opts: Some("--advertise-addr 127.0.0.1:9999 --data-directory \"some path with spaces\" --listen-addr 127.0.0.1:8001".into()), + frontend_opts: Some("--config-path=src/config/original.toml".into()), + compactor_opts: None, + prometheus_listener_addr: Some("127.0.0.1:1234".into()), + config_path: Some("src/config/test.toml".into()), }; + assert_eq!(actual, opts); + + // Test parsing into node-level opts. let actual = parse_opt_args(&opts); check( actual, expect![[r#" - ( - [ - "--data-dir", - "some path with spaces", - "--port", - "8001", - ], - [ - "--listen-address", - "127.0.0.1", - "--port", - "8000", - ], - [ - "--some-option", - ], - )"#]], + ParsedStandaloneOpts { + meta_opts: Some( + MetaNodeOpts { + vpc_id: None, + security_group_id: None, + listen_addr: "127.0.0.1:8001", + advertise_addr: "127.0.0.1:9999", + dashboard_host: None, + prometheus_host: None, + etcd_endpoints: "", + etcd_auth: false, + etcd_username: "", + etcd_password: "", + sql_endpoint: None, + dashboard_ui_path: None, + prometheus_endpoint: None, + connector_rpc_endpoint: None, + privatelink_endpoint_default_tags: None, + config_path: "src/config/test.toml", + backend: None, + barrier_interval_ms: None, + sstable_size_mb: None, + block_size_kb: None, + bloom_false_positive: None, + state_store: None, + data_directory: Some( + "some path with spaces", + ), + do_not_config_object_storage_lifecycle: None, + backup_storage_url: None, + backup_storage_directory: None, + object_store_streaming_read_timeout_ms: None, + object_store_streaming_upload_timeout_ms: None, + object_store_upload_timeout_ms: None, + object_store_read_timeout_ms: None, + heap_profiling_dir: None, + }, + ), + compute_opts: Some( + ComputeNodeOpts { + listen_addr: "127.0.0.1:8000", + advertise_addr: None, + prometheus_listener_addr: "127.0.0.1:1234", + meta_address: "http://127.0.0.1:5690", + connector_rpc_endpoint: None, + connector_rpc_sink_payload_format: None, + config_path: "src/config/test.toml", + total_memory_bytes: 34359738368, + parallelism: 10, + role: Both, + metrics_level: None, + data_file_cache_dir: None, + meta_file_cache_dir: None, + async_stack_trace: None, + heap_profiling_dir: None, + object_store_streaming_read_timeout_ms: None, + object_store_streaming_upload_timeout_ms: None, + object_store_upload_timeout_ms: None, + object_store_read_timeout_ms: None, + }, + ), + frontend_opts: Some( + FrontendOpts { + listen_addr: "127.0.0.1:4566", + advertise_addr: None, + port: None, + meta_addr: "http://127.0.0.1:5690", + prometheus_listener_addr: "127.0.0.1:1234", + health_check_listener_addr: "127.0.0.1:6786", + config_path: "src/config/test.toml", + metrics_level: None, + enable_barrier_read: None, + }, + ), + compactor_opts: None, + }"#]], ); } } diff --git a/src/common/Cargo.toml b/src/common/Cargo.toml index 83f85b8a3be6e..ddd1fe5a33cdb 100644 --- a/src/common/Cargo.toml +++ b/src/common/Cargo.toml @@ -18,9 +18,11 @@ anyhow = "1" arc-swap = "1" arrow-array = { workspace = true } arrow-buffer = { workspace = true } +arrow-cast = { workspace = true } arrow-schema = { workspace = true } async-trait = "0.1" auto_enums = "0.8" +auto_impl = "1" bitflags = "2" byteorder = "1" bytes = "1" @@ -62,8 +64,9 @@ postgres-types = { version = "0.2.6", features = [ "with-chrono-0_4", "with-serde_json-1", ] } +prehash = "1" prometheus = { version = "0.13" } -prost = "0.11" +prost = { workspace = true } rand = "0.8" regex = "1" reqwest = { version = "0.11", features = ["json"] } @@ -92,7 +95,7 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [ "time", "signal", ] } -toml = "0.7" +toml = "0.8" tonic = { workspace = true } tracing = "0.1" tracing-opentelemetry = "0.21" @@ -113,7 +116,7 @@ libc = "0.2" [target.'cfg(target_os = "macos")'.dependencies] darwin-libproc = { git = "https://github.com/risingwavelabs/darwin-libproc.git", rev = "a502be24bd0971463f5bcbfe035a248d8ba503b7" } -libc = "0.2.147" +libc = "0.2.148" mach2 = "0.4" [dev-dependencies] diff --git a/src/common/heap_profiling/Cargo.toml b/src/common/heap_profiling/Cargo.toml new file mode 100644 index 0000000000000..c7123eaac5817 --- /dev/null +++ b/src/common/heap_profiling/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "risingwave_common_heap_profiling" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[package.metadata.cargo-machete] +ignored = ["workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["workspace-hack"] + +[dependencies] +anyhow = "1" +chrono = { version = "0.4", default-features = false, features = [ + "clock", + "std", +] } +parking_lot = "0.12" +risingwave_common = { workspace = true } +tikv-jemalloc-ctl = { workspace = true } +tokio = { version = "0.2", package = "madsim-tokio" } +tracing = "0.1" + +[lints] +workspace = true diff --git a/src/common/heap_profiling/src/jeprof.rs b/src/common/heap_profiling/src/jeprof.rs new file mode 100644 index 0000000000000..013632f32838e --- /dev/null +++ b/src/common/heap_profiling/src/jeprof.rs @@ -0,0 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::path::Path; +use std::process::Command; +use std::{env, fs}; + +use anyhow::anyhow; +use risingwave_common::error::Result; + +pub async fn run(profile_path: String, collapsed_path: String) -> Result<()> { + let executable_path = env::current_exe()?; + + let prof_cmd = move || { + Command::new("jeprof") + .arg("--collapsed") + .arg(executable_path) + .arg(Path::new(&profile_path)) + .output() + }; + match tokio::task::spawn_blocking(prof_cmd).await.unwrap() { + Ok(output) => { + if output.status.success() { + fs::write(Path::new(&collapsed_path), &output.stdout)?; + Ok(()) + } else { + Err(anyhow!( + "jeprof exit with an error. stdout: {}, stderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ) + .into()) + } + } + Err(e) => Err(e.into()), + } +} diff --git a/src/common/heap_profiling/src/lib.rs b/src/common/heap_profiling/src/lib.rs new file mode 100644 index 0000000000000..f6ffb66d836d7 --- /dev/null +++ b/src/common/heap_profiling/src/lib.rs @@ -0,0 +1,22 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub const MANUALLY_DUMP_SUFFIX: &str = "manual.heap"; +pub const AUTO_DUMP_SUFFIX: &str = "auto.heap"; +pub const COLLAPSED_SUFFIX: &str = "collapsed"; + +pub mod jeprof; +pub mod profiler; + +pub use profiler::HeapProfiler; diff --git a/src/common/heap_profiling/src/profiler.rs b/src/common/heap_profiling/src/profiler.rs new file mode 100644 index 0000000000000..49e81e8526241 --- /dev/null +++ b/src/common/heap_profiling/src/profiler.rs @@ -0,0 +1,120 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::ffi::CString; +use std::fs; +use std::path::Path; + +use parking_lot::Once; +use risingwave_common::config::HeapProfilingConfig; +use tikv_jemalloc_ctl::{ + epoch as jemalloc_epoch, opt as jemalloc_opt, prof as jemalloc_prof, stats as jemalloc_stats, +}; +use tokio::time::{self, Duration}; + +use super::AUTO_DUMP_SUFFIX; + +pub struct HeapProfiler { + config: HeapProfilingConfig, + threshold_auto_dump_heap_profile: usize, + jemalloc_dump_mib: jemalloc_prof::dump_mib, + jemalloc_allocated_mib: jemalloc_stats::allocated_mib, + jemalloc_epoch_mib: tikv_jemalloc_ctl::epoch_mib, + /// If jemalloc profiling is enabled + opt_prof: bool, +} + +impl HeapProfiler { + pub fn new(total_memory: usize, config: HeapProfilingConfig) -> Self { + let threshold_auto_dump_heap_profile = + (total_memory as f64 * config.threshold_auto as f64) as usize; + let jemalloc_dump_mib = jemalloc_prof::dump::mib().unwrap(); + let jemalloc_allocated_mib = jemalloc_stats::allocated::mib().unwrap(); + let jemalloc_epoch_mib = jemalloc_epoch::mib().unwrap(); + let opt_prof = jemalloc_opt::prof::read().unwrap(); + + Self { + config, + threshold_auto_dump_heap_profile, + jemalloc_dump_mib, + jemalloc_allocated_mib, + jemalloc_epoch_mib, + opt_prof, + } + } + + fn dump_heap_prof(&self, cur_used_memory_bytes: usize, prev_used_memory_bytes: usize) { + if !self.config.enable_auto { + return; + } + + if cur_used_memory_bytes > self.threshold_auto_dump_heap_profile + && prev_used_memory_bytes <= self.threshold_auto_dump_heap_profile + { + if !self.opt_prof { + tracing::info!("Cannot dump heap profile because Jemalloc prof is not enabled"); + return; + } + + let time_prefix = chrono::Local::now().format("%Y-%m-%d-%H-%M-%S"); + let file_name = format!("{}.{}", time_prefix, AUTO_DUMP_SUFFIX); + + let file_path = Path::new(&self.config.dir) + .join(&file_name) + .to_str() + .expect("file path is not valid utf8") + .to_owned(); + let file_path_c = CString::new(file_path).expect("0 byte in file path"); + + // FIXME(yuhao): `unsafe` here because `jemalloc_dump_mib.write` requires static lifetime + if let Err(e) = self + .jemalloc_dump_mib + .write(unsafe { &*(file_path_c.as_c_str() as *const _) }) + { + tracing::warn!("Auto Jemalloc dump heap file failed! {:?}", e); + } else { + tracing::info!("Successfully dumped heap profile to {}", file_name); + } + } + } + + fn advance_jemalloc_epoch(&self, prev_jemalloc_allocated_bytes: usize) -> usize { + if let Err(e) = self.jemalloc_epoch_mib.advance() { + tracing::warn!("Jemalloc epoch advance failed! {:?}", e); + } + + self.jemalloc_allocated_mib.read().unwrap_or_else(|e| { + tracing::warn!("Jemalloc read allocated failed! {:?}", e); + prev_jemalloc_allocated_bytes + }) + } + + pub fn start(self) { + static START: Once = Once::new(); + START.call_once(|| { + fs::create_dir_all(&self.config.dir).unwrap(); + tokio::spawn(async move { + let mut interval = time::interval(Duration::from_millis(500)); + let mut prev_jemalloc_allocated_bytes = 0; + loop { + interval.tick().await; + let jemalloc_allocated_bytes = + self.advance_jemalloc_epoch(prev_jemalloc_allocated_bytes); + self.dump_heap_prof(jemalloc_allocated_bytes, prev_jemalloc_allocated_bytes); + prev_jemalloc_allocated_bytes = jemalloc_allocated_bytes; + } + }); + }) + } +} diff --git a/src/common/proc_macro/Cargo.toml b/src/common/proc_macro/Cargo.toml index b129cedc0e183..0f86bff6a19d2 100644 --- a/src/common/proc_macro/Cargo.toml +++ b/src/common/proc_macro/Cargo.toml @@ -23,7 +23,5 @@ proc-macro2 = { version = "1", default-features = false } syn = "1" bae = "0.1.7" -[target.'cfg(not(madsim))'.dependencies] -workspace-hack = { path = "../../workspace-hack" } [lints] workspace = true diff --git a/src/common/proc_macro/src/config.rs b/src/common/proc_macro/src/config.rs index 285834eb123cf..6e369fbad33eb 100644 --- a/src/common/proc_macro/src/config.rs +++ b/src/common/proc_macro/src/config.rs @@ -41,7 +41,7 @@ fn type_is_option(ty: &syn::Type) -> bool { false } -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] pub fn produce_override_config(input: DeriveInput) -> TokenStream { let syn::Data::Struct(syn::DataStruct { fields, .. }) = input.data else { abort!(input, "Only struct is supported"); diff --git a/src/common/proc_macro/src/lib.rs b/src/common/proc_macro/src/lib.rs index 060ee1950624e..a11e407c6c053 100644 --- a/src/common/proc_macro/src/lib.rs +++ b/src/common/proc_macro/src/lib.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] use estimate_size::{ add_trait_bounds, extract_ignored_generics_list, has_nested_flag_attribute_list, @@ -52,7 +52,7 @@ mod estimate_size; /// } /// } /// ``` -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] #[proc_macro_derive(OverrideConfig, attributes(override_opts))] #[proc_macro_error] pub fn override_config(input: TokenStream) -> TokenStream { diff --git a/src/common/src/array/arrow.rs b/src/common/src/array/arrow.rs index 9b4165b608d98..36336ee5f819b 100644 --- a/src/common/src/array/arrow.rs +++ b/src/common/src/array/arrow.rs @@ -17,20 +17,54 @@ use std::fmt::Write; use arrow_array::Array as ArrowArray; -use arrow_schema::{Field, Schema, DECIMAL256_MAX_PRECISION}; +use arrow_cast::cast; +use arrow_schema::{Field, Schema, SchemaRef, DECIMAL256_MAX_PRECISION}; use chrono::{NaiveDateTime, NaiveTime}; use itertools::Itertools; use super::*; use crate::types::{Int256, StructType}; -use crate::util::iter_util::ZipEqDebug; +use crate::util::iter_util::{ZipEqDebug, ZipEqFast}; + +/// Converts RisingWave array to Arrow array with the schema. +/// This function will try to convert the array if the type is not same with the schema. +pub fn to_record_batch_with_schema( + schema: SchemaRef, + chunk: &DataChunk, +) -> Result { + if !chunk.is_compacted() { + let c = chunk.clone(); + return to_record_batch_with_schema(schema, &c.compact()); + } + let columns: Vec<_> = chunk + .columns() + .iter() + .zip_eq_fast(schema.fields().iter()) + .map(|(column, field)| { + let column: arrow_array::ArrayRef = column.as_ref().try_into()?; + if column.data_type() == field.data_type() { + Ok(column) + } else { + cast(&column, field.data_type()) + .map_err(|err| ArrayError::FromArrow(err.to_string())) + } + }) + .try_collect::<_, _, ArrayError>()?; -// Implement bi-directional `From` between `DataChunk` and `arrow_array::RecordBatch`. + let opts = arrow_array::RecordBatchOptions::default().with_row_count(Some(chunk.capacity())); + arrow_array::RecordBatch::try_new_with_options(schema, columns, &opts) + .map_err(|err| ArrayError::ToArrow(err.to_string())) +} +// Implement bi-directional `From` between `DataChunk` and `arrow_array::RecordBatch`. impl TryFrom<&DataChunk> for arrow_array::RecordBatch { type Error = ArrayError; fn try_from(chunk: &DataChunk) -> Result { + if !chunk.is_compacted() { + let c = chunk.clone(); + return Self::try_from(&c.compact()); + } let columns: Vec<_> = chunk .columns() .iter() @@ -47,8 +81,9 @@ impl TryFrom<&DataChunk> for arrow_array::RecordBatch { .collect(); let schema = Arc::new(Schema::new(fields)); - - arrow_array::RecordBatch::try_new(schema, columns) + let opts = + arrow_array::RecordBatchOptions::default().with_row_count(Some(chunk.capacity())); + arrow_array::RecordBatch::try_new_with_options(schema, columns, &opts) .map_err(|err| ArrayError::ToArrow(err.to_string())) } } diff --git a/src/common/src/array/bool_array.rs b/src/common/src/array/bool_array.rs index 9835ee8a5865c..fb12bb819fffd 100644 --- a/src/common/src/array/bool_array.rs +++ b/src/common/src/array/bool_array.rs @@ -30,6 +30,15 @@ impl BoolArray { Self { bitmap, data } } + /// Build a [`BoolArray`] from iterator and bitmap. + /// + /// NOTE: The length of `bitmap` must be equal to the length of `iter`. + pub fn from_iter_bitmap(iter: impl IntoIterator, bitmap: Bitmap) -> Self { + let data: Bitmap = iter.into_iter().collect(); + assert_eq!(data.len(), bitmap.len()); + BoolArray { bitmap, data } + } + pub fn data(&self) -> &Bitmap { &self.data } diff --git a/src/common/src/array/compact_chunk.rs b/src/common/src/array/compact_chunk.rs new file mode 100644 index 0000000000000..2b93e28a0be7d --- /dev/null +++ b/src/common/src/array/compact_chunk.rs @@ -0,0 +1,216 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::hash::BuildHasherDefault; +use std::mem; + +use itertools::Itertools; +use prehash::{new_prehashed_map_with_capacity, Passthru, Prehashed}; + +use super::stream_chunk::{OpRowMutRef, StreamChunkMut}; +use crate::array::{Op, RowRef, StreamChunk}; +use crate::row::{Project, RowExt}; +use crate::util::hash_util::Crc32FastBuilder; + +/// Compact the stream chunks with just modify the `Ops` and visibility of the chunk. Currently, two +/// transformation will be applied +/// - remove intermediate operation of the same key. The operations of the same stream key will only +/// have three kind of patterns Insert, Delete or Update. +/// - For the update (-old row, +old row), when old row is exactly same. The two rowOp will be +/// removed. +pub struct StreamChunkCompactor { + chunks: Vec, + stream_key: Vec, +} + +struct OpRowMutRefTuple<'a> { + previous: Option>, + latest: OpRowMutRef<'a>, +} + +impl<'a> OpRowMutRefTuple<'a> { + /// return true if no row left + fn push(&mut self, mut op_row: OpRowMutRef<'a>) -> bool { + debug_assert!(self.latest.vis()); + match (self.latest.op(), op_row.op()) { + (Op::Insert, Op::Insert) => panic!("receive duplicated insert on the stream"), + (Op::Delete, Op::Delete) => panic!("receive duplicated delete on the stream"), + (Op::Insert, Op::Delete) => { + self.latest.set_vis(false); + op_row.set_vis(false); + self.latest = if let Some(prev) = self.previous.take() { + prev + } else { + return true; + } + } + (Op::Delete, Op::Insert) => { + // The operation for the key must be (+, -, +) or (-, +). And the (+, -) must has + // been filtered. + debug_assert!(self.previous.is_none()); + self.previous = Some(mem::replace(&mut self.latest, op_row)); + } + // `all the updateDelete` and `updateInsert` should be normalized to `delete` + // and`insert` + _ => unreachable!(), + }; + false + } + + fn as_update_op(&mut self) -> Option<(&mut OpRowMutRef<'a>, &mut OpRowMutRef<'a>)> { + self.previous.as_mut().map(|prev| { + debug_assert_eq!(prev.op(), Op::Delete); + debug_assert_eq!(self.latest.op(), Op::Insert); + (prev, &mut self.latest) + }) + } +} + +type OpRowMap<'a, 'b> = + HashMap>>, OpRowMutRefTuple<'a>, BuildHasherDefault>; + +impl StreamChunkCompactor { + pub fn new(stream_key: Vec) -> Self { + Self { + stream_key, + chunks: vec![], + } + } + + pub fn into_inner(self) -> (Vec, Vec) { + (self.chunks, self.stream_key) + } + + pub fn push_chunk(&mut self, c: StreamChunk) { + self.chunks.push(c); + } + + /// Compact a chunk by modifying the ops and the visibility of a stream chunk. All UPDATE INSERT + /// and UPDATE DELETE will be converted to INSERT and DELETE, and dropped according to + /// certain rules (see `merge_insert` and `merge_delete` for more details). + pub fn into_compacted_chunks(self) -> impl Iterator { + let (chunks, key_indices) = self.into_inner(); + + let estimate_size = chunks.iter().map(|c| c.cardinality()).sum(); + let mut chunks: Vec<(Vec, StreamChunkMut)> = chunks + .into_iter() + .map(|c| { + let hash_values = c + .data_chunk() + .get_hash_values(&key_indices, Crc32FastBuilder) + .into_iter() + .map(|hash| hash.value()) + .collect_vec(); + (hash_values, StreamChunkMut::from(c)) + }) + .collect_vec(); + + let mut op_row_map: OpRowMap<'_, '_> = new_prehashed_map_with_capacity(estimate_size); + for (hash_values, c) in &mut chunks { + for (row, mut op_row) in c.to_rows_mut() { + op_row.set_op(op_row.op().normalize_update()); + let hash = hash_values[row.index()]; + let stream_key = row.project(&key_indices); + match op_row_map.entry(Prehashed::new(stream_key, hash)) { + Entry::Vacant(v) => { + v.insert(OpRowMutRefTuple { + previous: None, + latest: op_row, + }); + } + Entry::Occupied(mut o) => { + if o.get_mut().push(op_row) { + o.remove_entry(); + } + } + } + } + } + for tuple in op_row_map.values_mut() { + if let Some((prev, latest)) = tuple.as_update_op() { + if prev.row_ref() == latest.row_ref() { + prev.set_vis(false); + latest.set_vis(false); + } else if prev.same_chunk(latest) && prev.index() + 1 == latest.index() { + // TODO(st1page): use next_one check in bitmap + prev.set_op(Op::UpdateDelete); + latest.set_op(Op::UpdateInsert); + } + } + } + chunks.into_iter().map(|(_, c)| c.into()) + } +} + +pub fn merge_chunk_row(stream_chunk: StreamChunk, pk_indices: &[usize]) -> StreamChunk { + let mut compactor = StreamChunkCompactor::new(pk_indices.to_vec()); + compactor.push_chunk(stream_chunk); + compactor.into_compacted_chunks().next().unwrap() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::array::StreamChunk; + use crate::test_prelude::StreamChunkTestExt; + + #[test] + fn test_merge_chunk_row() { + let pk_indices = [0, 1]; + let mut compactor = StreamChunkCompactor::new(pk_indices.to_vec()); + compactor.push_chunk(StreamChunk::from_pretty( + " I I I + - 1 1 1 + + 1 1 2 + + 2 5 7 + + 4 9 2 + - 2 5 7 + + 2 5 5 + - 6 6 9 + + 6 6 9 + - 9 9 1", + )); + compactor.push_chunk(StreamChunk::from_pretty( + " I I I + - 6 6 9 + + 9 9 9 + - 9 9 4 + + 2 2 2 + + 9 9 1", + )); + let mut iter = compactor.into_compacted_chunks(); + assert_eq!( + iter.next().unwrap().compact(), + StreamChunk::from_pretty( + " I I I + U- 1 1 1 + U+ 1 1 2 + + 4 9 2 + + 2 5 5 + - 6 6 9", + ) + ); + assert_eq!( + iter.next().unwrap().compact(), + StreamChunk::from_pretty( + " I I I + + 2 2 2", + ) + ); + + assert_eq!(iter.next(), None); + } +} diff --git a/src/common/src/array/data_chunk.rs b/src/common/src/array/data_chunk.rs index f335b56a60edb..98a237814176d 100644 --- a/src/common/src/array/data_chunk.rs +++ b/src/common/src/array/data_chunk.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::borrow::Cow; use std::fmt::Display; use std::hash::BuildHasher; use std::sync::Arc; @@ -24,7 +25,7 @@ use rand::rngs::SmallRng; use rand::{Rng, SeedableRng}; use risingwave_pb::data::PbDataChunk; -use super::{Array, ArrayImpl, ArrayRef, ArrayResult, StructArray, Vis}; +use super::{Array, ArrayImpl, ArrayRef, ArrayResult, StructArray}; use crate::array::data_chunk_iter::RowRef; use crate::array::ArrayBuilderImpl; use crate::buffer::{Bitmap, BitmapBuilder}; @@ -63,24 +64,25 @@ use crate::util::value_encoding::{ #[must_use] pub struct DataChunk { columns: Arc<[ArrayRef]>, - vis2: Vis, + visibility: Bitmap, } impl DataChunk { pub(crate) const PRETTY_TABLE_PRESET: &'static str = "||--+-++| ++++++"; - /// Create a `DataChunk` with `columns` and visibility. The visibility can either be a `Bitmap` - /// or a simple cardinality number. - pub fn new>(columns: Vec, vis: V) -> Self { - let vis: Vis = vis.into(); - let capacity = vis.len(); + /// Create a `DataChunk` with `columns` and visibility. + /// + /// The visibility can either be a `Bitmap` or a simple cardinality number. + pub fn new(columns: Vec, visibility: impl Into) -> Self { + let visibility = visibility.into(); + let capacity = visibility.len(); for column in &columns { assert_eq!(capacity, column.len()); } DataChunk { columns: columns.into(), - vis2: vis, + visibility, } } @@ -88,7 +90,7 @@ impl DataChunk { pub fn new_dummy(cardinality: usize) -> Self { DataChunk { columns: Arc::new([]), - vis2: Vis::Compact(cardinality), + visibility: Bitmap::ones(cardinality), } } @@ -114,28 +116,22 @@ impl DataChunk { /// Return the next visible row index on or after `row_idx`. pub fn next_visible_row_idx(&self, row_idx: usize) -> Option { - match &self.vis2 { - Vis::Bitmap(vis) => vis.next_set_bit(row_idx), - Vis::Compact(cardinality) => { - if row_idx < *cardinality { - Some(row_idx) - } else { - None - } - } - } + self.visibility.next_set_bit(row_idx) } - pub fn into_parts(self) -> (Vec, Vis) { - (self.columns.to_vec(), self.vis2) + pub fn into_parts(self) -> (Vec, Bitmap) { + (self.columns.to_vec(), self.visibility) } - pub fn into_parts_v2(self) -> (Arc<[ArrayRef]>, Vis) { - (self.columns, self.vis2) + pub fn into_parts_v2(self) -> (Arc<[ArrayRef]>, Bitmap) { + (self.columns, self.visibility) } - pub fn from_parts(columns: Arc<[ArrayRef]>, vis2: Vis) -> Self { - Self { columns, vis2 } + pub fn from_parts(columns: Arc<[ArrayRef]>, visibilities: Bitmap) -> Self { + Self { + columns, + visibility: visibilities, + } } pub fn dimension(&self) -> usize { @@ -144,53 +140,42 @@ impl DataChunk { /// `cardinality` returns the number of visible tuples pub fn cardinality(&self) -> usize { - match &self.vis2 { - Vis::Bitmap(b) => b.count_ones(), - Vis::Compact(len) => *len, - } + self.visibility.count_ones() } /// `capacity` returns physical length of any chunk column pub fn capacity(&self) -> usize { - self.vis2.len() - } - - pub fn vis(&self) -> &Vis { - &self.vis2 + self.visibility.len() } pub fn selectivity(&self) -> f64 { - match &self.vis2 { - Vis::Bitmap(b) => { - if b.is_empty() { - 0.0 - } else { - b.count_ones() as f64 / b.len() as f64 - } - } - Vis::Compact(_) => 1.0, + if self.visibility.is_empty() { + 0.0 + } else if self.visibility.all() { + 1.0 + } else { + self.visibility.count_ones() as f64 / self.visibility.len() as f64 } } - pub fn with_visibility(&self, visibility: impl Into) -> Self { + pub fn with_visibility(&self, visibility: impl Into) -> Self { DataChunk { columns: self.columns.clone(), - vis2: visibility.into(), + visibility: visibility.into(), } } - pub fn visibility(&self) -> Option<&Bitmap> { - self.vis2.as_visibility() - } - - pub fn set_vis(&mut self, vis: Vis) { - assert_eq!(vis.len(), self.capacity()); - self.vis2 = vis; + pub fn visibility(&self) -> &Bitmap { + &self.visibility } pub fn set_visibility(&mut self, visibility: Bitmap) { assert_eq!(visibility.len(), self.capacity()); - self.vis2 = Vis::Bitmap(visibility); + self.visibility = visibility; + } + + pub fn is_compacted(&self) -> bool { + self.visibility.all() } pub fn column_at(&self, idx: usize) -> &ArrayRef { @@ -213,16 +198,13 @@ impl DataChunk { /// Panics if `idx > columns.len()`. pub fn split_column_at(&self, idx: usize) -> (Self, Self) { let (left, right) = self.columns.split_at(idx); - let left = DataChunk::new(left.to_vec(), self.vis2.clone()); - let right = DataChunk::new(right.to_vec(), self.vis2.clone()); + let left = DataChunk::new(left.to_vec(), self.visibility.clone()); + let right = DataChunk::new(right.to_vec(), self.visibility.clone()); (left, right) } pub fn to_protobuf(&self) -> PbDataChunk { - assert!( - matches!(self.vis2, Vis::Compact(_)), - "must be compacted before transfer" - ); + assert!(self.visibility.all(), "must be compacted before transfer"); let mut proto = PbDataChunk { cardinality: self.cardinality() as u32, columns: Default::default(), @@ -248,21 +230,82 @@ impl DataChunk { /// The main benefit is that the data chunk is smaller, taking up less memory. /// We can also save the cost of iterating over many hidden rows. pub fn compact(self) -> Self { - match &self.vis2 { - Vis::Compact(_) => self, - Vis::Bitmap(visibility) => { - let cardinality = visibility.count_ones(); - let columns = self - .columns - .iter() - .map(|col| { - let array = col; - array.compact(visibility, cardinality).into() - }) - .collect::>(); - Self::new(columns, cardinality) + if self.visibility.all() { + return self; + } + let cardinality = self.visibility.count_ones(); + let columns = self + .columns + .iter() + .map(|col| { + let array = col; + array.compact(&self.visibility, cardinality).into() + }) + .collect::>(); + Self::new(columns, Bitmap::ones(cardinality)) + } + + pub fn uncompact(self, vis: Bitmap) -> Self { + let mut uncompact_builders: Vec<_> = self + .columns + .iter() + .map(|c| c.create_builder(vis.len())) + .collect(); + let mut last_u = None; + + for (idx, u) in vis.iter_ones().enumerate() { + // pad invisible rows with NULL + let zeros = if let Some(last_u) = last_u { + u - last_u - 1 + } else { + u + }; + for _ in 0..zeros { + uncompact_builders + .iter_mut() + .for_each(|builder| builder.append_null()); } + uncompact_builders + .iter_mut() + .zip_eq_fast(self.columns.iter()) + .for_each(|(builder, c)| builder.append(c.datum_at(idx))); + last_u = Some(u); } + let zeros = if let Some(last_u) = last_u { + vis.len() - last_u - 1 + } else { + vis.len() + }; + for _ in 0..zeros { + uncompact_builders + .iter_mut() + .for_each(|builder| builder.append_null()); + } + let array: Vec<_> = uncompact_builders + .into_iter() + .map(|builder| Arc::new(builder.finish())) + .collect(); + + Self::new(array, vis) + } + + /// Convert the chunk to compact format. + /// + /// If the chunk is not compacted, return a new compacted chunk, otherwise return a reference to self. + pub fn compact_cow(&self) -> Cow<'_, Self> { + if self.visibility.all() { + return Cow::Borrowed(self); + } + let cardinality = self.visibility.count_ones(); + let columns = self + .columns + .iter() + .map(|col| { + let array = col; + array.compact(&self.visibility, cardinality).into() + }) + .collect::>(); + Cow::Owned(Self::new(columns, Bitmap::ones(cardinality))) } pub fn from_protobuf(proto: &PbDataChunk) -> ArrayResult { @@ -384,7 +427,7 @@ impl DataChunk { /// * bool - whether this tuple is visible pub fn row_at(&self, pos: usize) -> (RowRef<'_>, bool) { let row = self.row_at_unchecked_vis(pos); - let vis = self.vis2.is_set(pos); + let vis = self.visibility.is_set(pos); (row, vis) } @@ -447,7 +490,7 @@ impl DataChunk { .collect(); DataChunk { columns, - vis2: self.vis2.clone(), + visibility: self.visibility.clone(), } } @@ -459,16 +502,16 @@ impl DataChunk { pub fn project(&self, indices: &[usize]) -> Self { Self { columns: indices.iter().map(|i| self.columns[*i].clone()).collect(), - vis2: self.vis2.clone(), + visibility: self.visibility.clone(), } } /// Reorder columns and set visibility. - pub fn project_with_vis(&self, indices: &[usize], vis: Vis) -> Self { - assert_eq!(vis.len(), self.capacity()); + pub fn project_with_vis(&self, indices: &[usize], visibility: Bitmap) -> Self { + assert_eq!(visibility.len(), self.capacity()); Self { columns: indices.iter().map(|i| self.columns[*i].clone()).collect(), - vis2: vis, + visibility, } } @@ -539,57 +582,54 @@ impl DataChunk { // Note(bugen): should we exclude the invisible rows in the output so that the caller won't need // to handle visibility again? pub fn serialize(&self) -> Vec { - let buffers = match &self.vis2 { - Vis::Bitmap(vis) => { - let rows_num = vis.len(); - let mut buffers: Vec> = vec![]; - let (row_len_fixed, col_variable) = self.partition_sizes(); - - // First initialize buffer with the right size to avoid re-allocations - for i in 0..rows_num { - // SAFETY(value_at_unchecked): the idx is always in bound. - unsafe { - if vis.is_set_unchecked(i) { - buffers.push(Self::init_buffer(row_len_fixed, &col_variable, i)); - } else { - buffers.push(vec![]); - } + let buffers = if !self.visibility.all() { + let rows_num = self.visibility.len(); + let mut buffers: Vec> = vec![]; + let (row_len_fixed, col_variable) = self.partition_sizes(); + + // First initialize buffer with the right size to avoid re-allocations + for i in 0..rows_num { + // SAFETY(value_at_unchecked): the idx is always in bound. + unsafe { + if self.visibility.is_set_unchecked(i) { + buffers.push(Self::init_buffer(row_len_fixed, &col_variable, i)); + } else { + buffers.push(vec![]); } } + } - // Then do the actual serialization - for c in &*self.columns { - assert_eq!(c.len(), rows_num); - for (i, buffer) in buffers.iter_mut().enumerate() { - // SAFETY(value_at_unchecked): the idx is always in bound. - unsafe { - if vis.is_set_unchecked(i) { - serialize_datum_into(c.value_at_unchecked(i), buffer); - } + // Then do the actual serialization + for c in &*self.columns { + assert_eq!(c.len(), rows_num); + for (i, buffer) in buffers.iter_mut().enumerate() { + // SAFETY(value_at_unchecked): the idx is always in bound. + unsafe { + if self.visibility.is_set_unchecked(i) { + serialize_datum_into(c.value_at_unchecked(i), buffer); } } } - buffers } - Vis::Compact(rows_num) => { - let mut buffers: Vec> = vec![]; - let (row_len_fixed, col_variable) = self.partition_sizes(); - for i in 0..*rows_num { - unsafe { - buffers.push(Self::init_buffer(row_len_fixed, &col_variable, i)); - } + buffers + } else { + let mut buffers: Vec> = vec![]; + let (row_len_fixed, col_variable) = self.partition_sizes(); + for i in 0..self.visibility.len() { + unsafe { + buffers.push(Self::init_buffer(row_len_fixed, &col_variable, i)); } - for c in &*self.columns { - assert_eq!(c.len(), *rows_num); - for (i, buffer) in buffers.iter_mut().enumerate() { - // SAFETY(value_at_unchecked): the idx is always in bound. - unsafe { - serialize_datum_into(c.value_at_unchecked(i), buffer); - } + } + for c in &*self.columns { + assert_eq!(c.len(), self.visibility.len()); + for (i, buffer) in buffers.iter_mut().enumerate() { + // SAFETY(value_at_unchecked): the idx is always in bound. + unsafe { + serialize_datum_into(c.value_at_unchecked(i), buffer); } } - buffers } + buffers }; buffers.into_iter().map(|item| item.into()).collect_vec() @@ -644,7 +684,7 @@ impl<'a> From<&'a StructArray> for DataChunk { fn from(array: &'a StructArray) -> Self { Self { columns: array.fields().cloned().collect(), - vis2: Vis::Compact(array.len()), + visibility: Bitmap::ones(array.len()), } } } @@ -655,7 +695,7 @@ impl EstimateSize for DataChunk { .iter() .map(|a| a.estimated_heap_size()) .sum::() - + self.vis2.estimated_heap_size() + + self.visibility.estimated_heap_size() } } @@ -779,6 +819,7 @@ impl DataChunkTestExt for DataChunk { "." => None, "t" => Some(true.into()), "f" => Some(false.into()), + "(empty)" => Some("".into()), _ => Some(ScalarImpl::from_text(val_str.as_bytes(), ty).unwrap()), }; builder.append(datum); @@ -794,11 +835,7 @@ impl DataChunkTestExt for DataChunk { .into_iter() .map(|builder| builder.finish().into()) .collect(); - let vis = if visibility.iter().all(|b| *b) { - Vis::Compact(visibility.len()) - } else { - Vis::Bitmap(Bitmap::from_iter(visibility)) - }; + let vis = Bitmap::from_iter(visibility); let chunk = DataChunk::new(columns, vis); chunk.assert_valid(); chunk @@ -828,14 +865,14 @@ impl DataChunkTestExt for DataChunk { builder.finish().into() }) .collect(); - let chunk = DataChunk::new(new_cols, Vis::Bitmap(new_vis.finish())); + let chunk = DataChunk::new(new_cols, new_vis.finish()); chunk.assert_valid(); chunk } fn assert_valid(&self) { let cols = self.columns(); - let vis = &self.vis2; + let vis = &self.visibility; let n = vis.len(); for col in cols { assert_eq!(col.len(), n); diff --git a/src/common/src/array/data_chunk_iter.rs b/src/common/src/array/data_chunk_iter.rs index 96e493e796fc1..887a9ab02437e 100644 --- a/src/common/src/array/data_chunk_iter.rs +++ b/src/common/src/array/data_chunk_iter.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::hash::Hash; use std::iter::{FusedIterator, TrustedLen}; use std::ops::Range; @@ -58,10 +59,7 @@ impl<'a> Iterator for DataChunkRefIter<'a> { match self.chunk.next_visible_row_idx(self.idx.start) { Some(idx) if idx < self.idx.end => { self.idx.start = idx + 1; - Some(RowRef { - chunk: self.chunk, - idx, - }) + Some(RowRef::new(self.chunk, idx)) } _ => { self.idx.start = self.idx.end; @@ -99,17 +97,14 @@ impl<'a> Iterator for DataChunkRefIterWithHoles<'a> { fn next(&mut self) -> Option { let len = self.chunk.capacity(); - let vis = self.chunk.vis(); + let vis = self.chunk.visibility(); if self.idx == len { None } else { let ret = Some(if !vis.is_set(self.idx) { None } else { - Some(RowRef { - chunk: self.chunk, - idx: self.idx, - }) + Some(RowRef::new(self.chunk, self.idx)) }); self.idx += 1; ret @@ -125,90 +120,160 @@ impl<'a> Iterator for DataChunkRefIterWithHoles<'a> { impl ExactSizeIterator for DataChunkRefIterWithHoles<'_> {} unsafe impl TrustedLen for DataChunkRefIterWithHoles<'_> {} -#[derive(Clone, Copy)] -pub struct RowRef<'a> { - chunk: &'a DataChunk, +// Deliberately making `RowRef` and `RowRefIter` defined in a private module to ensure +// the checks in the constructors are always performed. +mod row_ref { + use super::*; - idx: usize, -} + #[derive(Clone, Copy)] + pub struct RowRef<'a> { + columns: &'a [ArrayRef], -impl<'a> std::fmt::Debug for RowRef<'a> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_list().entries(self.iter()).finish() + idx: usize, } -} -impl<'a> RowRef<'a> { - pub fn new(chunk: &'a DataChunk, idx: usize) -> Self { - debug_assert!(idx < chunk.capacity()); - Self { chunk, idx } + impl<'a> std::fmt::Debug for RowRef<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_list().entries(self.iter()).finish() + } } - /// Get the index of this row in the data chunk. - #[must_use] - pub fn index(&self) -> usize { - self.idx + impl<'a> RowRef<'a> { + pub fn new(chunk: &'a DataChunk, idx: usize) -> Self { + assert!( + idx < chunk.capacity(), + "index {idx} out of bound {}", + chunk.capacity() + ); + + Self { + columns: chunk.columns(), + idx, + } + } + + pub fn with_columns(columns: &'a [ArrayRef], idx: usize) -> Self { + for column in columns { + assert!( + idx < column.len(), + "index {idx} out of bound {}", + column.len() + ); + } + + Self { columns, idx } + } + + /// Get the index of this row in the data chunk. + #[must_use] + pub fn index(&self) -> usize { + self.idx + } } -} -impl PartialEq for RowRef<'_> { - fn eq(&self, other: &Self) -> bool { - self.iter().eq(other.iter()) + impl PartialEq for RowRef<'_> { + fn eq(&self, other: &Self) -> bool { + self.iter().eq(other.iter()) + } } -} -impl Eq for RowRef<'_> {} + impl Eq for RowRef<'_> {} -impl Row for RowRef<'_> { - fn datum_at(&self, index: usize) -> DatumRef<'_> { - debug_assert!(self.idx < self.chunk.capacity()); - // for `RowRef`, the index is always in bound. - unsafe { self.chunk.columns()[index].value_at_unchecked(self.idx) } + impl Hash for RowRef<'_> { + fn hash(&self, state: &mut H) { + self.hash_datums_into(state) + } } - unsafe fn datum_at_unchecked(&self, index: usize) -> DatumRef<'_> { - debug_assert!(self.idx < self.chunk.capacity()); - // for `RowRef`, the index is always in bound. - self.chunk - .columns() - .get_unchecked(index) - .value_at_unchecked(self.idx) + impl Row for RowRef<'_> { + fn datum_at(&self, index: usize) -> DatumRef<'_> { + // SAFETY: `self.idx` is already checked in `new` or `with_columns`. + unsafe { self.columns[index].value_at_unchecked(self.idx) } + } + + unsafe fn datum_at_unchecked(&self, index: usize) -> DatumRef<'_> { + self.columns + .get_unchecked(index) + .value_at_unchecked(self.idx) + } + + fn len(&self) -> usize { + self.columns.len() + } + + fn iter(&self) -> impl ExactSizeIterator> { + RowRefIter { + columns: self.columns.iter(), + row_idx: self.idx, + } + } } - fn len(&self) -> usize { - self.chunk.columns().len() + #[derive(Clone)] + pub struct RowRefIter<'a> { + columns: std::slice::Iter<'a, ArrayRef>, + row_idx: usize, } - fn iter(&self) -> impl ExactSizeIterator> { - debug_assert!(self.idx < self.chunk.capacity()); - RowRefIter { - columns: self.chunk.columns().iter(), - row_idx: self.idx, + impl<'a> Iterator for RowRefIter<'a> { + type Item = DatumRef<'a>; + + fn next(&mut self) -> Option { + // SAFETY: `self.row_idx` is already checked in `new` or `with_columns` of `RowRef`. + unsafe { + self.columns + .next() + .map(|col| col.value_at_unchecked(self.row_idx)) + } + } + + fn size_hint(&self) -> (usize, Option) { + self.columns.size_hint() } } -} -#[derive(Clone)] -pub struct RowRefIter<'a> { - columns: std::slice::Iter<'a, ArrayRef>, - row_idx: usize, + impl ExactSizeIterator for RowRefIter<'_> {} + unsafe impl TrustedLen for RowRefIter<'_> {} } -impl<'a> Iterator for RowRefIter<'a> { - type Item = DatumRef<'a>; +pub use row_ref::{RowRef, RowRefIter}; - fn next(&mut self) -> Option { - // SAFETY: for `RowRef`, the index is always in bound. - unsafe { - self.columns - .next() - .map(|col| col.value_at_unchecked(self.row_idx)) +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use crate::array::StreamChunk; + use crate::test_prelude::StreamChunkTestExt; + + #[test] + fn test_row_ref_hash() { + let mut set = HashSet::new(); + let chunk1 = StreamChunk::from_pretty( + " I I I + + 2 5 1 + + 4 9 2 + - 2 5 1", + ); + for (_, row) in chunk1.rows() { + set.insert(row); } - } + assert_eq!(set.len(), 2); - fn size_hint(&self) -> (usize, Option) { - self.columns.size_hint() + let chunk2 = StreamChunk::from_pretty( + " I I I + - 4 9 2", + ); + for (_, row) in chunk2.rows() { + set.insert(row); + } + assert_eq!(set.len(), 2); + + let chunk3 = StreamChunk::from_pretty( + " I I I + + 1 2 3", + ); + for (_, row) in chunk3.rows() { + set.insert(row); + } + assert_eq!(set.len(), 3); } } - -impl ExactSizeIterator for RowRefIter<'_> {} -unsafe impl TrustedLen for RowRefIter<'_> {} diff --git a/src/common/src/array/list_array.rs b/src/common/src/array/list_array.rs index 2c4a8cf042548..7eaaffff98534 100644 --- a/src/common/src/array/list_array.rs +++ b/src/common/src/array/list_array.rs @@ -12,12 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use core::fmt; use std::cmp::Ordering; +use std::fmt; use std::fmt::Debug; use std::future::Future; use std::hash::Hash; use std::mem::size_of; +use std::ops::{Index, IndexMut}; use bytes::{Buf, BufMut}; use either::Either; @@ -359,6 +360,20 @@ impl Ord for ListValue { } } +impl Index for ListValue { + type Output = Datum; + + fn index(&self, index: usize) -> &Self::Output { + &self.values[index] + } +} + +impl IndexMut for ListValue { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.values[index] + } +} + // Used to display ListValue in explain for better readibilty. pub fn display_for_explain(list: &ListValue) -> String { // Example of ListValue display: ARRAY[1, 2, null] @@ -485,7 +500,7 @@ impl<'a> ListRef<'a> { } /// Get the element at the given index. Returns `None` if the index is out of bounds. - pub fn elem_at(self, index: usize) -> Option> { + pub fn get(self, index: usize) -> Option> { iter_elems_ref!(self, it, { let mut it = it; it.nth(index) @@ -551,12 +566,9 @@ impl Ord for ListRef<'_> { impl Debug for ListRef<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - iter_elems_ref!(*self, it, { - for v in it { - Debug::fmt(&v, f)?; - } - Ok(()) - }) + let mut f = f.debug_list(); + iter_elems_ref!(*self, it, { f.entries(it) }); + f.finish() } } @@ -606,6 +618,12 @@ impl ToText for ListRef<'_> { } } +impl<'a> From<&'a ListValue> for ListRef<'a> { + fn from(val: &'a ListValue) -> Self { + ListRef::ValueRef { val } + } +} + #[cfg(test)] mod tests { use more_asserts::{assert_gt, assert_lt}; @@ -1014,7 +1032,7 @@ mod tests { ); // Get 2nd value from ListRef - let scalar = list_ref.elem_at(1).unwrap(); + let scalar = list_ref.get(1).unwrap(); assert_eq!(scalar, Some(types::ScalarRefImpl::Int32(5))); } } diff --git a/src/common/src/array/mod.rs b/src/common/src/array/mod.rs index ba9b0bff0f4f5..2a505cddfc5c5 100644 --- a/src/common/src/array/mod.rs +++ b/src/common/src/array/mod.rs @@ -15,9 +15,11 @@ //! `Array` defines all in-memory representations of vectorized execution framework. mod arrow; +pub use arrow::to_record_batch_with_schema; mod bool_array; pub mod bytes_array; mod chrono_array; +pub mod compact_chunk; mod data_chunk; pub mod data_chunk_iter; mod decimal_array; @@ -35,7 +37,6 @@ pub mod stream_record; pub mod struct_array; mod utf8_array; mod value_reader; -mod vis; use std::convert::From; use std::hash::{Hash, Hasher}; @@ -47,6 +48,7 @@ pub use chrono_array::{ DateArray, DateArrayBuilder, TimeArray, TimeArrayBuilder, TimestampArray, TimestampArrayBuilder, TimestamptzArray, TimestamptzArrayBuilder, }; +pub use compact_chunk::*; pub use data_chunk::{DataChunk, DataChunkTestExt}; pub use data_chunk_iter::RowRef; pub use decimal_array::{DecimalArray, DecimalArrayBuilder}; @@ -61,7 +63,6 @@ use risingwave_pb::data::PbArray; pub use stream_chunk::{Op, StreamChunk, StreamChunkTestExt}; pub use struct_array::{StructArray, StructArrayBuilder, StructRef, StructValue}; pub use utf8_array::*; -pub use vis::{Vis, VisRef}; pub use self::error::ArrayError; pub use crate::array::num256_array::{Int256Array, Int256ArrayBuilder}; diff --git a/src/common/src/array/num256_array.rs b/src/common/src/array/num256_array.rs index 65b7daf784979..9845ead46ba05 100644 --- a/src/common/src/array/num256_array.rs +++ b/src/common/src/array/num256_array.rs @@ -209,3 +209,13 @@ impl EstimateSize for Int256Array { self.bitmap.estimated_heap_size() + self.data.capacity() * size_of::() } } + +impl FromIterator for Int256Array { + fn from_iter>(iter: I) -> Self { + let data: Vec = iter.into_iter().map(|i| *i.0).collect(); + Int256Array { + bitmap: Bitmap::ones(data.len()), + data, + } + } +} diff --git a/src/common/src/array/stream_chunk.rs b/src/common/src/array/stream_chunk.rs index e280af1257c23..b1d299caaf6e4 100644 --- a/src/common/src/array/stream_chunk.rs +++ b/src/common/src/array/stream_chunk.rs @@ -24,10 +24,9 @@ use rand::prelude::SmallRng; use rand::{Rng, SeedableRng}; use risingwave_pb::data::{PbOp, PbStreamChunk}; -use super::vis::VisMut; -use super::{ArrayImpl, ArrayRef, ArrayResult, DataChunkTestExt}; -use crate::array::{DataChunk, Vis}; -use crate::buffer::Bitmap; +use super::{ArrayImpl, ArrayRef, ArrayResult, DataChunkTestExt, RowRef}; +use crate::array::DataChunk; +use crate::buffer::{Bitmap, BitmapBuilder}; use crate::catalog::Schema; use crate::estimate_size::EstimateSize; use crate::field_generator::VarcharProperty; @@ -59,16 +58,26 @@ impl Op { } pub fn from_protobuf(prost: &i32) -> ArrayResult { - let op = match PbOp::from_i32(*prost) { - Some(PbOp::Insert) => Op::Insert, - Some(PbOp::Delete) => Op::Delete, - Some(PbOp::UpdateInsert) => Op::UpdateInsert, - Some(PbOp::UpdateDelete) => Op::UpdateDelete, - Some(PbOp::Unspecified) => unreachable!(), - None => bail!("No such op type"), + let op = match PbOp::try_from(*prost) { + Ok(PbOp::Insert) => Op::Insert, + Ok(PbOp::Delete) => Op::Delete, + Ok(PbOp::UpdateInsert) => Op::UpdateInsert, + Ok(PbOp::UpdateDelete) => Op::UpdateDelete, + Ok(PbOp::Unspecified) => unreachable!(), + Err(_) => bail!("No such op type"), }; Ok(op) } + + /// convert `UpdateDelete` to `Delete` and `UpdateInsert` to Insert + pub fn normalize_update(self) -> Op { + match self { + Op::Insert => Op::Insert, + Op::Delete => Op::Delete, + Op::UpdateDelete => Op::Delete, + Op::UpdateInsert => Op::Insert, + } + } } pub type Ops<'a> = &'a [Op]; @@ -94,21 +103,24 @@ impl Default for StreamChunk { } impl StreamChunk { - pub fn new( + /// Create a new `StreamChunk` with given ops and columns. + pub fn new(ops: impl Into>, columns: Vec) -> Self { + let ops = ops.into(); + let visibility = Bitmap::ones(ops.len()); + Self::with_visibility(ops, columns, visibility) + } + + /// Create a new `StreamChunk` with given ops, columns and visibility. + pub fn with_visibility( ops: impl Into>, columns: Vec, - visibility: Option, + visibility: Bitmap, ) -> Self { let ops = ops.into(); for col in &columns { assert_eq!(col.len(), ops.len()); } - - let vis = match visibility { - Some(b) => Vis::Bitmap(b), - None => Vis::Compact(ops.len()), - }; - let data = DataChunk::new(columns, vis); + let data = DataChunk::new(columns, visibility); StreamChunk { ops, data } } @@ -132,7 +144,7 @@ impl StreamChunk { .into_iter() .map(|builder| builder.finish().into()) .collect::>(); - StreamChunk::new(ops, new_columns, None) + StreamChunk::new(ops, new_columns) } /// Get the reference of the underlying data chunk. @@ -142,25 +154,24 @@ impl StreamChunk { /// compact the `StreamChunk` with its visibility map pub fn compact(self) -> Self { - if self.visibility().is_none() { + if self.is_compacted() { return self; } let (ops, columns, visibility) = self.into_inner(); - let visibility = visibility.as_visibility().unwrap(); let cardinality = visibility .iter() .fold(0, |vis_cnt, vis| vis_cnt + vis as usize); let columns: Vec<_> = columns .into_iter() - .map(|col| col.compact(visibility, cardinality).into()) + .map(|col| col.compact(&visibility, cardinality).into()) .collect(); let mut new_ops = Vec::with_capacity(cardinality); for idx in visibility.iter_ones() { new_ops.push(ops[idx]); } - StreamChunk::new(new_ops, columns, None) + StreamChunk::new(new_ops, columns) } pub fn into_parts(self) -> (DataChunk, Arc<[Op]>) { @@ -169,15 +180,18 @@ impl StreamChunk { pub fn from_parts(ops: impl Into>, data_chunk: DataChunk) -> Self { let (columns, vis) = data_chunk.into_parts(); - Self::new(ops, columns, vis.into_visibility()) + Self::with_visibility(ops, columns, vis) } - pub fn into_inner(self) -> (Arc<[Op]>, Vec, Vis) { + pub fn into_inner(self) -> (Arc<[Op]>, Vec, Bitmap) { let (columns, vis) = self.data.into_parts(); (self.ops, columns, vis) } pub fn to_protobuf(&self) -> PbStreamChunk { + if !self.is_compacted() { + return self.clone().compact().to_protobuf(); + } PbStreamChunk { cardinality: self.cardinality() as u32, ops: self.ops.iter().map(|op| op.to_protobuf() as i32).collect(), @@ -195,7 +209,7 @@ impl StreamChunk { for column in prost.get_columns() { columns.push(ArrayImpl::from_protobuf(column, cardinality)?.into()); } - Ok(StreamChunk::new(ops, columns, None)) + Ok(StreamChunk::new(ops, columns)) } pub fn ops(&self) -> &[Op] { @@ -267,7 +281,7 @@ impl StreamChunk { } /// Reorder columns and set visibility. - pub fn project_with_vis(&self, indices: &[usize], vis: Vis) -> Self { + pub fn project_with_vis(&self, indices: &[usize], vis: Bitmap) -> Self { Self { ops: self.ops.clone(), data: self.data.project_with_vis(indices, vis), @@ -275,7 +289,7 @@ impl StreamChunk { } /// Clone the `StreamChunk` with a new visibility. - pub fn with_visibility(&self, vis: Vis) -> Self { + pub fn clone_with_vis(&self, vis: Bitmap) -> Self { Self { ops: self.ops.clone(), data: self.data.with_visibility(vis), @@ -349,7 +363,19 @@ impl OpsMut { } } + pub fn len(&self) -> usize { + match &self.state { + OpsMutState::ArcRef(v) => v.len(), + OpsMutState::Mut(v) => v.len(), + } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn set(&mut self, n: usize, val: Op) { + debug_assert!(n < self.len()); if let OpsMutState::Mut(v) = &mut self.state { v[n] = val; } else { @@ -362,6 +388,14 @@ impl OpsMut { self.state = OpsMutState::Mut(v); } } + + pub fn get(&self, n: usize) -> Op { + debug_assert!(n < self.len()); + match &self.state { + OpsMutState::ArcRef(v) => v[n], + OpsMutState::Mut(v) => v[n], + } + } } impl From for Arc<[Op]> { fn from(v: OpsMut) -> Self { @@ -377,7 +411,7 @@ impl From for Arc<[Op]> { pub struct StreamChunkMut { columns: Arc<[ArrayRef]>, ops: OpsMut, - vis: VisMut, + vis: BitmapBuilder, } impl From for StreamChunkMut { @@ -394,15 +428,28 @@ impl From for StreamChunkMut { impl From for StreamChunk { fn from(c: StreamChunkMut) -> Self { - StreamChunk::from_parts(c.ops, DataChunk::from_parts(c.columns, c.vis.into())) + StreamChunk::from_parts(c.ops, DataChunk::from_parts(c.columns, c.vis.finish())) } } + pub struct OpRowMutRef<'a> { c: &'a mut StreamChunkMut, i: usize, } impl OpRowMutRef<'_> { + pub fn index(&self) -> usize { + self.i + } + + pub fn vis(&self) -> bool { + self.c.vis.is_set(self.i) + } + + pub fn op(&self) -> Op { + self.c.ops.get(self.i) + } + pub fn set_vis(&mut self, val: bool) { self.c.set_vis(self.i, val); } @@ -410,6 +457,15 @@ impl OpRowMutRef<'_> { pub fn set_op(&mut self, val: Op) { self.c.set_op(self.i, val); } + + pub fn row_ref(&self) -> RowRef<'_> { + RowRef::with_columns(self.c.columns(), self.i) + } + + /// return if the two row ref is in the same chunk + pub fn same_chunk(&self, other: &Self) -> bool { + std::ptr::eq(self.c, other.c) + } } impl StreamChunkMut { @@ -421,14 +477,23 @@ impl StreamChunkMut { self.ops.set(n, val); } + pub fn columns(&self) -> &[ArrayRef] { + &self.columns + } + /// get the mut reference of the stream chunk. - pub fn to_mut_rows(&self) -> impl Iterator> { + pub fn to_rows_mut(&mut self) -> impl Iterator, OpRowMutRef<'_>)> { unsafe { - (0..self.vis.len()).map(|i| { - let p = self as *const StreamChunkMut; - let p = p as *mut StreamChunkMut; - OpRowMutRef { c: &mut *p, i } - }) + (0..self.vis.len()) + .filter(|i| self.vis.is_set(*i)) + .map(|i| { + let p = self as *const StreamChunkMut; + let p = p as *mut StreamChunkMut; + ( + RowRef::with_columns(self.columns(), i), + OpRowMutRef { c: &mut *p, i }, + ) + }) } } } @@ -548,7 +613,7 @@ impl StreamChunkTestExt for StreamChunk { fn valid(&self) -> bool { let len = self.ops.len(); let data = &self.data; - data.vis().len() == len && data.columns().iter().all(|col| col.len() == len) + data.visibility().len() == len && data.columns().iter().all(|col| col.len() == len) } fn concat(chunks: Vec) -> StreamChunk { diff --git a/src/common/src/array/stream_chunk_iter.rs b/src/common/src/array/stream_chunk_iter.rs index 1e58fe825ffc2..82a8e9997b661 100644 --- a/src/common/src/array/stream_chunk_iter.rs +++ b/src/common/src/array/stream_chunk_iter.rs @@ -52,6 +52,18 @@ impl StreamChunk { let (row, visible) = self.data_chunk().row_at(pos); (op, row, visible) } + + pub fn rows_with_holes(&self) -> impl Iterator)>> { + self.data_chunk().rows_with_holes().map(|row| { + row.map(|row| { + ( + // SAFETY: index is checked since we are in the iterator. + unsafe { *self.ops().get_unchecked(row.index()) }, + row, + ) + }) + }) + } } pub struct StreamChunkRefIter<'a> { diff --git a/src/common/src/array/struct_array.rs b/src/common/src/array/struct_array.rs index 9dfb23fe4e921..27c5d8c0dc237 100644 --- a/src/common/src/array/struct_array.rs +++ b/src/common/src/array/struct_array.rs @@ -267,7 +267,7 @@ impl From for StructArray { Self::new( StructType::unnamed(chunk.columns().iter().map(|c| c.data_type()).collect()), chunk.columns().to_vec(), - chunk.vis().to_bitmap(), + chunk.visibility().clone(), ) } } diff --git a/src/common/src/array/vis.rs b/src/common/src/array/vis.rs deleted file mode 100644 index 5cdb3bcd9ab1a..0000000000000 --- a/src/common/src/array/vis.rs +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::mem; - -use either::Either; -use itertools::repeat_n; - -use crate::buffer::{Bitmap, BitmapBuilder, BitmapIter, BitmapOnesIter}; -use crate::estimate_size::EstimateSize; - -/// `Vis` is a visibility bitmap of rows. -#[derive(Clone, PartialEq, Debug)] -pub enum Vis { - /// Non-compact variant. - /// Certain rows are hidden using this bitmap. - Bitmap(Bitmap), - - /// Compact variant which just stores cardinality of rows. - /// This can be used when all rows are visible. - Compact(usize), // equivalent to all ones of this size -} - -impl From for Vis { - fn from(b: Bitmap) -> Self { - Vis::Bitmap(b) - } -} - -impl From for Vis { - fn from(c: usize) -> Self { - Vis::Compact(c) - } -} - -impl From for Vis { - fn from(v: VisMut) -> Self { - match v.state { - VisMutState::Bitmap(x) => Vis::Bitmap(x), - VisMutState::Compact(x) => Vis::Compact(x), - VisMutState::Builder(x) => Vis::Bitmap(x.finish()), - } - } -} - -impl Vis { - pub fn into_mut(self) -> VisMut { - VisMut::from(self) - } - - pub fn is_empty(&self) -> bool { - self.as_ref().is_empty() - } - - pub fn len(&self) -> usize { - self.as_ref().len() - } - - /// # Panics - /// Panics if `idx > len`. - pub fn is_set(&self, idx: usize) -> bool { - self.as_ref().is_set(idx) - } - - pub fn iter(&self) -> Iter<'_> { - self.as_ref().iter() - } - - pub fn iter_ones(&self) -> OnesIter<'_> { - self.as_ref().iter_ones() - } - - #[inline(always)] - pub fn as_ref(&self) -> VisRef<'_> { - match self { - Vis::Bitmap(b) => VisRef::Bitmap(b), - Vis::Compact(c) => VisRef::Compact(*c), - } - } - - /// Returns a bitmap of this `Vis`. - pub fn to_bitmap(&self) -> Bitmap { - match self { - Vis::Bitmap(b) => b.clone(), - Vis::Compact(c) => Bitmap::ones(*c), - } - } - - /// Consumes this `Vis` and returns the inner `Bitmap` if not compact. - pub fn into_visibility(self) -> Option { - match self { - Vis::Bitmap(b) => Some(b), - Vis::Compact(_) => None, - } - } - - /// Returns a reference to the inner `Bitmap` if not compact. - pub fn as_visibility(&self) -> Option<&Bitmap> { - match self { - Vis::Bitmap(b) => Some(b), - Vis::Compact(_) => None, - } - } -} - -impl EstimateSize for Vis { - fn estimated_heap_size(&self) -> usize { - match self { - Vis::Bitmap(bitmap) => bitmap.estimated_heap_size(), - Vis::Compact(_) => 0, - } - } -} - -impl std::ops::BitAndAssign<&Bitmap> for Vis { - fn bitand_assign(&mut self, rhs: &Bitmap) { - match self { - Vis::Bitmap(lhs) => lhs.bitand_assign(rhs), - Vis::Compact(_) => *self = Vis::Bitmap(rhs.clone()), - } - } -} - -impl std::ops::BitAndAssign for Vis { - fn bitand_assign(&mut self, rhs: Bitmap) { - match self { - Vis::Bitmap(lhs) => lhs.bitand_assign(&rhs), - Vis::Compact(_) => *self = Vis::Bitmap(rhs), - } - } -} - -impl std::ops::BitAnd<&Bitmap> for &Vis { - type Output = Vis; - - fn bitand(self, rhs: &Bitmap) -> Self::Output { - match self { - Vis::Bitmap(lhs) => Vis::Bitmap(lhs.bitand(rhs)), - Vis::Compact(_) => Vis::Bitmap(rhs.clone()), - } - } -} - -impl<'a, 'b> std::ops::BitAnd<&'b Vis> for &'a Vis { - type Output = Vis; - - fn bitand(self, rhs: &'b Vis) -> Self::Output { - self.as_ref().bitand(rhs.as_ref()) - } -} - -impl<'a> std::ops::BitAnd for &'a Vis { - type Output = Vis; - - fn bitand(self, rhs: Vis) -> Self::Output { - self.as_ref().bitand(rhs) - } -} - -impl<'a, 'b> std::ops::BitOr<&'b Vis> for &'a Vis { - type Output = Vis; - - fn bitor(self, rhs: &'b Vis) -> Self::Output { - self.as_ref().bitor(rhs.as_ref()) - } -} - -impl<'a> std::ops::Not for &'a Vis { - type Output = Vis; - - fn not(self) -> Self::Output { - self.as_ref().not() - } -} - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum VisRef<'a> { - Bitmap(&'a Bitmap), - Compact(usize), // equivalent to all ones of this size -} - -pub type Iter<'a> = Either, itertools::RepeatN>; -pub type OnesIter<'a> = Either, std::ops::Range>; - -impl<'a> VisRef<'a> { - pub fn is_empty(self) -> bool { - match self { - VisRef::Bitmap(b) => b.is_empty(), - VisRef::Compact(c) => c == 0, - } - } - - pub fn len(self) -> usize { - match self { - VisRef::Bitmap(b) => b.len(), - VisRef::Compact(c) => c, - } - } - - /// # Panics - /// - /// Panics if `idx > len`. - pub fn is_set(self, idx: usize) -> bool { - match self { - VisRef::Bitmap(b) => b.is_set(idx), - VisRef::Compact(c) => { - assert!(idx <= c); - true - } - } - } - - pub fn iter(self) -> Iter<'a> { - match self { - VisRef::Bitmap(b) => Either::Left(b.iter()), - VisRef::Compact(c) => Either::Right(repeat_n(true, c)), - } - } - - pub fn iter_ones(self) -> OnesIter<'a> { - match self { - VisRef::Bitmap(b) => Either::Left(b.iter_ones()), - VisRef::Compact(c) => Either::Right(0..c), - } - } -} - -impl<'a> From<&'a Bitmap> for VisRef<'a> { - fn from(b: &'a Bitmap) -> Self { - VisRef::Bitmap(b) - } -} - -impl<'a> From for VisRef<'a> { - fn from(c: usize) -> Self { - VisRef::Compact(c) - } -} - -impl<'a> From<&'a Vis> for VisRef<'a> { - fn from(vis: &'a Vis) -> Self { - vis.as_ref() - } -} - -impl<'a, 'b> std::ops::BitAnd> for VisRef<'a> { - type Output = Vis; - - fn bitand(self, rhs: VisRef<'b>) -> Self::Output { - match (self, rhs) { - (VisRef::Bitmap(b1), VisRef::Bitmap(b2)) => Vis::Bitmap(b1 & b2), - (VisRef::Bitmap(b1), VisRef::Compact(c2)) => { - assert_eq!(b1.len(), c2); - Vis::Bitmap(b1.clone()) - } - (VisRef::Compact(c1), VisRef::Bitmap(b2)) => { - assert_eq!(c1, b2.len()); - Vis::Bitmap(b2.clone()) - } - (VisRef::Compact(c1), VisRef::Compact(c2)) => { - assert_eq!(c1, c2); - Vis::Compact(c1) - } - } - } -} - -impl<'a> std::ops::BitAnd for VisRef<'a> { - type Output = Vis; - - fn bitand(self, rhs: Vis) -> Self::Output { - match (self, rhs) { - (VisRef::Bitmap(b1), Vis::Bitmap(b2)) => Vis::Bitmap(b1 & b2), - (VisRef::Bitmap(b1), Vis::Compact(c2)) => { - assert_eq!(b1.len(), c2); - Vis::Bitmap(b1.clone()) - } - (VisRef::Compact(c1), Vis::Bitmap(b2)) => { - assert_eq!(c1, b2.len()); - Vis::Bitmap(b2) - } - (VisRef::Compact(c1), Vis::Compact(c2)) => { - assert_eq!(c1, c2); - Vis::Compact(c1) - } - } - } -} - -impl<'a, 'b> std::ops::BitOr> for VisRef<'a> { - type Output = Vis; - - fn bitor(self, rhs: VisRef<'b>) -> Self::Output { - match (self, rhs) { - (VisRef::Bitmap(b1), VisRef::Bitmap(b2)) => Vis::Bitmap(b1 | b2), - (VisRef::Bitmap(b1), VisRef::Compact(c2)) => { - assert_eq!(b1.len(), c2); - Vis::Compact(c2) - } - (VisRef::Compact(c1), VisRef::Bitmap(b2)) => { - assert_eq!(c1, b2.len()); - Vis::Compact(c1) - } - (VisRef::Compact(c1), VisRef::Compact(c2)) => { - assert_eq!(c1, c2); - Vis::Compact(c1) - } - } - } -} - -impl<'a> std::ops::BitOr for VisRef<'a> { - type Output = Vis; - - fn bitor(self, rhs: Vis) -> Self::Output { - // Unlike the `bitand` implementation, we can forward by ref directly here, because this - // will not introduce unnecessary clones. - self.bitor(rhs.as_ref()) - } -} - -impl<'a> std::ops::Not for VisRef<'a> { - type Output = Vis; - - fn not(self) -> Self::Output { - match self { - VisRef::Bitmap(b) => Vis::Bitmap(!b), - VisRef::Compact(c) => Vis::Bitmap(BitmapBuilder::zeroed(c).finish()), - } - } -} - -/// A mutable wrapper for `Vis`. can only set the visibilities and can not change the size. -#[derive(Debug)] -pub struct VisMut { - state: VisMutState, -} - -#[derive(Debug)] -enum VisMutState { - /// Non-compact variant. - /// Certain rows are hidden using this bitmap. - Bitmap(Bitmap), - - /// Compact variant which just stores cardinality of rows. - /// This can be used when all rows are visible. - Compact(usize), // equivalent to all ones of this size - - Builder(BitmapBuilder), -} - -impl From for VisMut { - fn from(vis: Vis) -> Self { - let state = match vis { - Vis::Bitmap(x) => VisMutState::Bitmap(x), - Vis::Compact(x) => VisMutState::Compact(x), - }; - Self { state } - } -} - -impl VisMut { - pub fn len(&self) -> usize { - match &self.state { - VisMutState::Bitmap(b) => b.len(), - VisMutState::Compact(c) => *c, - VisMutState::Builder(b) => b.len(), - } - } - - /// # Panics - /// - /// Panics if `idx >= len`. - pub fn is_set(&self, idx: usize) -> bool { - match &self.state { - VisMutState::Bitmap(b) => b.is_set(idx), - VisMutState::Compact(c) => { - assert!(idx < *c); - true - } - VisMutState::Builder(b) => b.is_set(idx), - } - } - - pub fn set(&mut self, n: usize, val: bool) { - if let VisMutState::Builder(b) = &mut self.state { - b.set(n, val); - } else { - let state = mem::replace(&mut self.state, VisMutState::Compact(0)); // intermediate state - let mut builder = match state { - VisMutState::Bitmap(b) => b.into(), - VisMutState::Compact(c) => BitmapBuilder::filled(c), - VisMutState::Builder(_) => unreachable!(), - }; - builder.set(n, val); - self.state = VisMutState::Builder(builder); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_vis_mut_from_compact() { - let n: usize = 128; - let vis = Vis::Compact(n); - let mut vis = vis.into_mut(); - assert_eq!(vis.len(), n); - - for i in 0..n { - assert!(vis.is_set(i), "{}", i); - } - vis.set(0, true); - for i in 0..n { - assert!(vis.is_set(i), "{}", i); - } - assert_eq!(vis.len(), n); - - let to_false = vec![1usize, 2, 14, 25, 17, 77, 62, 96]; - for i in &to_false { - vis.set(*i, false); - } - assert_eq!(vis.len(), n); - for i in 0..n { - assert_eq!(vis.is_set(i), !to_false.contains(&i), "{}", i); - } - - let vis: Vis = vis.into(); - assert_eq!(vis.len(), n); - for i in 0..n { - assert_eq!(vis.is_set(i), !to_false.contains(&i), "{}", i); - } - let count_ones = match &vis { - Vis::Bitmap(b) => b.count_ones(), - Vis::Compact(len) => *len, - }; - assert_eq!(count_ones, n - to_false.len()); - } - #[test] - fn test_vis_mut_from_bitmap() { - let zeros = 61usize; - let ones = 62usize; - let n: usize = ones + zeros; - - let mut builder = BitmapBuilder::default(); - builder.append_bitmap(&Bitmap::zeros(zeros)); - builder.append_bitmap(&Bitmap::ones(ones)); - - let vis = Vis::Bitmap(builder.finish()); - assert_eq!(vis.len(), n); - - let mut vis = vis.into_mut(); - assert_eq!(vis.len(), n); - for i in 0..n { - assert_eq!(vis.is_set(i), i >= zeros, "{}", i); - } - - vis.set(0, false); - assert_eq!(vis.len(), n); - for i in 0..n { - assert_eq!(vis.is_set(i), i >= zeros, "{}", i); - } - - let toggles = vec![1usize, 2, 14, 25, 17, 77, 62, 96]; - for i in &toggles { - let i = *i; - vis.set(i, i < zeros); - } - assert_eq!(vis.len(), n); - for i in 0..zeros { - assert_eq!(vis.is_set(i), toggles.contains(&i), "{}", i); - } - for i in zeros..n { - assert_eq!(vis.is_set(i), !toggles.contains(&i), "{}", i); - } - - let vis: Vis = vis.into(); - assert_eq!(vis.len(), n); - for i in 0..zeros { - assert_eq!(vis.is_set(i), toggles.contains(&i), "{}", i); - } - for i in zeros..n { - assert_eq!(vis.is_set(i), !toggles.contains(&i), "{}", i); - } - let count_ones = match &vis { - Vis::Bitmap(b) => b.count_ones(), - Vis::Compact(len) => *len, - }; - let mut expected_ones = ones; - for i in &toggles { - let i = *i; - if i < zeros { - expected_ones += 1; - } else { - expected_ones -= 1; - } - } - assert_eq!(count_ones, expected_ones); - } -} diff --git a/src/common/src/buffer/bitmap.rs b/src/common/src/buffer/bitmap.rs index d5da545ddfe3c..e6f908556ca90 100644 --- a/src/common/src/buffer/bitmap.rs +++ b/src/common/src/buffer/bitmap.rs @@ -419,6 +419,12 @@ impl Bitmap { } } +impl From for Bitmap { + fn from(val: usize) -> Self { + Self::ones(val) + } +} + impl<'a, 'b> BitAnd<&'b Bitmap> for &'a Bitmap { type Output = Bitmap; @@ -469,6 +475,12 @@ impl BitAndAssign<&Bitmap> for Bitmap { } } +impl BitAndAssign for Bitmap { + fn bitand_assign(&mut self, rhs: Bitmap) { + *self = &*self & rhs; + } +} + impl<'a, 'b> BitOr<&'b Bitmap> for &'a Bitmap { type Output = Bitmap; diff --git a/src/common/src/cast/mod.rs b/src/common/src/cast/mod.rs index 82c69984ec0ea..21a217967830a 100644 --- a/src/common/src/cast/mod.rs +++ b/src/common/src/cast/mod.rs @@ -12,81 +12,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -use chrono::{NaiveDate, NaiveDateTime, NaiveTime}; use itertools::Itertools; -use speedate::{Date as SpeedDate, DateTime as SpeedDateTime, Time as SpeedTime}; -use crate::types::{Date, Time, Timestamp, Timestamptz}; +use crate::types::{Timestamp, Timestamptz}; type Result = std::result::Result; -pub const PARSE_ERROR_STR_TO_TIMESTAMP: &str = "Can't cast string to timestamp (expected format is YYYY-MM-DD HH:MM:SS[.D+{up to 6 digits}] or YYYY-MM-DD HH:MM or YYYY-MM-DD or ISO 8601 format)"; -pub const PARSE_ERROR_STR_TO_TIME: &str = - "Can't cast string to time (expected format is HH:MM:SS[.D+{up to 6 digits}][Z] or HH:MM)"; -pub const PARSE_ERROR_STR_TO_DATE: &str = - "Can't cast string to date (expected format is YYYY-MM-DD)"; pub const PARSE_ERROR_STR_TO_BYTEA: &str = "Invalid Bytea syntax"; const ERROR_INT_TO_TIMESTAMP: &str = "Can't cast negative integer to timestamp"; -pub fn str_to_date(elem: &str) -> Result { - Ok(Date::new(parse_naive_date(elem)?)) -} - -pub fn str_to_time(elem: &str) -> Result
Actor IDInstanceBlock Rate
Fragment IDs → DownstreamBlock Rate
{m.metric.actor_id}{m.metric.instance}
{`Fragment ${m.metric.fragment_id} -> ${m.metric.downstream_fragment_id}`}
{ - let mut builder = S3::default(); - - // Sink will not load config from file. - builder.disable_config_load(); - - builder - .root(&self.table_root) - .bucket(&self.bucket_name) - .access_key_id(&self.config.access_key) - .secret_access_key(&self.config.secret_key); - - if let Some(region) = &self.config.region { - builder.region(region); - } - - if let Some(endpoint) = &self.config.endpoint { - builder.endpoint(endpoint); - } + let catalog = load_catalog(&self.config.build_iceberg_configs()?) + .await + .map_err(|e| SinkError::Iceberg(anyhow!("Unable to load iceberg catalog: {e}")))?; - let op = opendal::Operator::new(builder) - .map_err(|err| SinkError::Config(anyhow!("{err}")))? - .finish(); + let table_id = TableIdentifier::new(self.config.table_name.split('.')) + .map_err(|e| SinkError::Iceberg(anyhow!("Unable to parse table name: {e}")))?; - let table = Table::open_with_config(op, self.config.iceberg_table_config.clone()) + let table = catalog + .load_table(&table_id) .await - .map_err(|err| SinkError::Iceberg(anyhow!("Create table fail: {}", err)))?; + .map_err(|err| SinkError::Iceberg(anyhow!(err)))?; let sink_schema = self.param.schema(); let iceberg_schema = table @@ -177,78 +292,57 @@ impl IcebergSink { Ok(table) } - /// Parse bucket name and table root path. - /// - /// return (bucket name, table root path) - fn parse_bucket_and_root_from_path(config: &IcebergConfig) -> Result<(String, String)> { - let url = Url::parse(&config.path).map_err(|err| { - SinkError::Config(anyhow!( - "Fail to parse Invalid path: {}, caused by: {}", - &config.path, - err - )) - })?; - - let scheme = url.scheme(); - if scheme != "s3a" && scheme != "s3" && scheme != "s3n" { - return Err(SinkError::Config(anyhow!( - "Invalid path: {}, only support s3a,s3,s3n prefix", - &config.path - ))); - } - - let bucket = url - .host_str() - .ok_or_else(|| SinkError::Config(anyhow!("Invalid path: {}", &config.path)))?; - let root = url.path(); - - let table_root_path = if root.is_empty() { - format!("/{}/{}", config.database_name, config.table_name) + pub fn new(config: IcebergConfig, param: SinkParam) -> Result { + let unique_column_ids = if config.r#type == SINK_TYPE_UPSERT && !config.force_append_only { + if let Some(pk) = &config.primary_key { + let mut unique_column_ids = Vec::with_capacity(pk.len()); + for col_name in pk { + let id = param + .columns + .iter() + .find(|col| col.name.as_str() == col_name) + .ok_or_else(|| { + SinkError::Config(anyhow!( + "Primary key column {} not found in sink schema", + col_name + )) + })? + .column_id + .get_id() as usize; + unique_column_ids.push(id); + } + Some(unique_column_ids) + } else { + unreachable!() + } } else { - format!("{}/{}/{}", root, config.database_name, config.table_name) + None }; - - Ok((bucket.to_string(), table_root_path)) - } - - pub fn new(config: IcebergConfig, param: SinkParam) -> Result { - let (bucket_name, table_root) = Self::parse_bucket_and_root_from_path(&config)?; - // TODO(ZENOTME): Only support append-only mode now. - if !config.force_append_only { - return Err(SinkError::Iceberg(anyhow!( - "Iceberg sink only support append-only mode now." - ))); - } - Ok(Self { config, param, - table_root, - bucket_name, + unique_column_ids, }) } } -#[async_trait::async_trait] impl Sink for IcebergSink { type Coordinator = IcebergSinkCommitter; - type Writer = CoordinatedSinkWriter; + type LogSinker = LogSinkerOf>; + + const SINK_NAME: &'static str = ICEBERG_SINK; - async fn validate(&self, _client: Option) -> Result<()> { + async fn validate(&self) -> Result<()> { let _ = self.create_table().await?; Ok(()) } - async fn new_writer(&self, writer_param: SinkWriterParam) -> Result { + async fn new_log_sinker(&self, writer_param: SinkWriterParam) -> Result { let table = self.create_table().await?; - - let inner = IcebergWriter { - is_append_only: self.config.force_append_only, - writer: table - .task_writer() - .await - .map_err(|err| SinkError::Iceberg(anyhow!(err)))?, - table, + let inner = if let Some(unique_column_ids) = &self.unique_column_ids { + IcebergWriter::new_upsert(table, unique_column_ids.clone()).await? + } else { + IcebergWriter::new_append_only(table).await? }; Ok(CoordinatedSinkWriter::new( writer_param @@ -264,40 +358,122 @@ impl Sink for IcebergSink { })?, inner, ) - .await?) + .await? + .into_log_sinker(writer_param.sink_metrics)) } - async fn new_coordinator( - &self, - _connector_client: Option, - ) -> Result { + async fn new_coordinator(&self) -> Result { let table = self.create_table().await?; + let partition_type = table.current_partition_type()?; - Ok(IcebergSinkCommitter { table }) + Ok(IcebergSinkCommitter { + table, + partition_type, + }) } } -/// TODO(ZENOTME): Just a placeholder, we will implement it later.(#10642) -pub struct IcebergWriter { - is_append_only: bool, - table: Table, - writer: icelake::io::task_writer::TaskWriter, +pub struct IcebergWriter(IcebergWriterEnum); + +enum IcebergWriterEnum { + AppendOnly(AppendOnlyWriter), + Upsert(UpsertWriter), } impl IcebergWriter { - async fn append_only_write(&mut self, chunk: StreamChunk) -> Result<()> { + pub async fn new_append_only(table: Table) -> Result { + Ok(Self(IcebergWriterEnum::AppendOnly( + AppendOnlyWriter::new(table).await?, + ))) + } + + pub async fn new_upsert(table: Table, unique_column_ids: Vec) -> Result { + Ok(Self(IcebergWriterEnum::Upsert( + UpsertWriter::new(table, unique_column_ids).await?, + ))) + } +} + +#[async_trait] +impl SinkWriter for IcebergWriter { + type CommitMetadata = Option; + + /// Begin a new epoch + async fn begin_epoch(&mut self, _epoch: u64) -> Result<()> { + // Just skip it. + Ok(()) + } + + /// Write a stream chunk to sink + async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { + match &mut self.0 { + IcebergWriterEnum::AppendOnly(writer) => writer.write(chunk).await?, + IcebergWriterEnum::Upsert(writer) => writer.write(chunk).await?, + } + Ok(()) + } + + /// Receive a barrier and mark the end of current epoch. When `is_checkpoint` is true, the sink + /// writer should commit the current epoch. + async fn barrier(&mut self, is_checkpoint: bool) -> Result> { + // Skip it if not checkpoint + if !is_checkpoint { + return Ok(None); + } + + let res = match &mut self.0 { + IcebergWriterEnum::AppendOnly(writer) => writer.flush().await?, + IcebergWriterEnum::Upsert(writer) => writer.flush().await?, + }; + + Ok(Some(SinkMetadata::try_from(&res)?)) + } + + /// Clean up + async fn abort(&mut self) -> Result<()> { + // TODO: abort should clean up all the data written in this epoch. + Ok(()) + } +} + +struct AppendOnlyWriter { + table: Table, + writer: icelake::io::task_writer::TaskWriter, + schema: SchemaRef, +} + +impl AppendOnlyWriter { + pub async fn new(table: Table) -> Result { + let schema = Arc::new( + table + .current_table_metadata() + .current_schema() + .map_err(|err| SinkError::Iceberg(anyhow!(err)))? + .clone() + .try_into() + .map_err(|err: icelake::Error| SinkError::Iceberg(anyhow!(err)))?, + ); + + Ok(Self { + writer: table + .writer_builder() + .await? + .build_task_writer() + .await + .map_err(|err| SinkError::Iceberg(anyhow!(err)))?, + table, + schema, + }) + } + + pub async fn write(&mut self, chunk: StreamChunk) -> Result<()> { let (mut chunk, ops) = chunk.into_parts(); let filters = - Bitmap::from_bool_slice(&ops.iter().map(|op| *op == Op::Insert).collect_vec()); - let filters = if let Some(ori_vis) = chunk.visibility() { - ori_vis & &filters - } else { - filters - }; + chunk.visibility() & ops.iter().map(|op| *op == Op::Insert).collect::(); chunk.set_visibility(filters); - let chunk = RecordBatch::try_from(&chunk.compact()) + let chunk = to_record_batch_with_schema(self.schema.clone(), &chunk.compact()) .map_err(|err| SinkError::Iceberg(anyhow!(err)))?; self.writer.write(&chunk).await.map_err(|err| { @@ -306,32 +482,308 @@ impl IcebergWriter { Ok(()) } + + pub async fn flush(&mut self) -> Result { + let old_writer = std::mem::replace( + &mut self.writer, + self.table + .writer_builder() + .await? + .build_task_writer() + .await + .map_err(|err| SinkError::Iceberg(anyhow!(err)))?, + ); + + let data_files = old_writer + .close() + .await + .map_err(|err| SinkError::Iceberg(anyhow!("Close writer fail: {}", err)))?; + + Ok(WriteResult { + data_files, + delete_files: vec![], + }) + } +} + +struct UpsertWriter { + writer: UpsertWriterInner, + schema: SchemaRef, +} +enum UpsertWriterInner { + Partition(PartitionDeltaWriter), + Unpartition(UnpartitionDeltaWriter), +} + +impl UpsertWriter { + pub async fn new(table: Table, unique_column_ids: Vec) -> Result { + let schema = Arc::new( + table + .current_table_metadata() + .current_schema() + .map_err(|err| SinkError::Iceberg(anyhow!(err)))? + .clone() + .try_into() + .map_err(|err: icelake::Error| SinkError::Iceberg(anyhow!(err)))?, + ); + let inner = if let Some(partition_splitter) = table.partition_splitter()? { + UpsertWriterInner::Partition(PartitionDeltaWriter::new( + table, + partition_splitter, + unique_column_ids, + )) + } else { + UpsertWriterInner::Unpartition( + UnpartitionDeltaWriter::new(table, unique_column_ids).await?, + ) + }; + Ok(Self { + writer: inner, + schema, + }) + } + + fn partition_ops(ops: &[Op]) -> Vec<(usize, usize)> { + assert!(!ops.is_empty()); + let mut res = vec![]; + let mut start = 0; + let mut prev_op = ops[0]; + for (i, op) in ops.iter().enumerate().skip(1) { + if *op != prev_op { + res.push((start, i)); + start = i; + prev_op = *op; + } + } + res.push((start, ops.len())); + res + } + + pub async fn write(&mut self, chunk: StreamChunk) -> Result<()> { + let (chunk, ops) = chunk.compact().into_parts(); + if ops.len() == 0 { + return Ok(()); + } + let chunk = to_record_batch_with_schema(self.schema.clone(), &chunk.compact()) + .map_err(|err| SinkError::Iceberg(anyhow!(err)))?; + let ranges = Self::partition_ops(&ops); + for (start, end) in ranges { + let batch = chunk.slice(start, end - start); + match ops[start] { + Op::UpdateInsert | Op::Insert => match &mut self.writer { + UpsertWriterInner::Partition(writer) => writer.write(batch).await?, + UpsertWriterInner::Unpartition(writer) => writer.write(batch).await?, + }, + + Op::UpdateDelete | Op::Delete => match &mut self.writer { + UpsertWriterInner::Partition(writer) => writer.delete(batch).await?, + UpsertWriterInner::Unpartition(writer) => writer.delete(batch).await?, + }, + } + } + Ok(()) + } + + pub async fn flush(&mut self) -> Result { + match &mut self.writer { + UpsertWriterInner::Partition(writer) => { + let mut data_files = vec![]; + let mut delete_files = vec![]; + for res in writer.flush().await? { + data_files.extend(res.data); + delete_files.extend(res.pos_delete); + delete_files.extend(res.eq_delete); + } + Ok(WriteResult { + data_files, + delete_files, + }) + } + UpsertWriterInner::Unpartition(writer) => { + let res = writer.flush().await?; + let delete_files = res.pos_delete.into_iter().chain(res.eq_delete).collect(); + Ok(WriteResult { + data_files: res.data, + delete_files, + }) + } + } + } +} + +struct UnpartitionDeltaWriter { + table: Table, + writer: icelake::io::file_writer::EqualityDeltaWriter, + unique_column_ids: Vec, +} + +impl UnpartitionDeltaWriter { + pub async fn new(table: Table, unique_column_ids: Vec) -> Result { + Ok(Self { + writer: table + .writer_builder() + .await? + .build_equality_delta_writer(unique_column_ids.clone()) + .await?, + table, + unique_column_ids, + }) + } + + pub async fn write(&mut self, batch: RecordBatch) -> Result<()> { + self.writer.write(batch).await?; + Ok(()) + } + + pub async fn delete(&mut self, batch: RecordBatch) -> Result<()> { + self.writer.delete(batch).await?; + Ok(()) + } + + pub async fn flush(&mut self) -> Result { + let writer = std::mem::replace( + &mut self.writer, + self.table + .writer_builder() + .await? + .build_equality_delta_writer(self.unique_column_ids.clone()) + .await?, + ); + Ok(writer.close(None).await?) + } } +struct PartitionDeltaWriter { + table: Table, + writers: HashMap< + icelake::types::PartitionKey, + icelake::io::file_writer::EqualityDeltaWriter, + >, + partition_splitter: icelake::types::PartitionSplitter, + unique_column_ids: Vec, +} + +impl PartitionDeltaWriter { + pub fn new( + table: Table, + partition_splitter: icelake::types::PartitionSplitter, + unique_column_ids: Vec, + ) -> Self { + Self { + table, + writers: HashMap::new(), + partition_splitter, + unique_column_ids, + } + } + + pub async fn write(&mut self, batch: RecordBatch) -> Result<()> { + let partitions = self.partition_splitter.split_by_partition(&batch)?; + for (partition_key, batch) in partitions { + match self.writers.entry(partition_key) { + Entry::Vacant(v) => { + v.insert( + self.table + .writer_builder() + .await? + .build_equality_delta_writer(self.unique_column_ids.clone()) + .await?, + ) + .write(batch) + .await? + } + Entry::Occupied(mut v) => v.get_mut().write(batch).await?, + } + } + Ok(()) + } + + pub async fn delete(&mut self, batch: RecordBatch) -> Result<()> { + let partitions = self.partition_splitter.split_by_partition(&batch)?; + for (partition_key, batch) in partitions { + match self.writers.entry(partition_key) { + Entry::Vacant(v) => { + v.insert( + self.table + .writer_builder() + .await? + .build_equality_delta_writer(self.unique_column_ids.clone()) + .await?, + ) + .delete(batch) + .await + .unwrap(); + } + Entry::Occupied(mut v) => v.get_mut().delete(batch).await.unwrap(), + } + } + Ok(()) + } + + pub async fn flush(&mut self) -> Result> { + let mut res = Vec::with_capacity(self.writers.len()); + for (partition_key, writer) in self.writers.drain() { + let partition_value = self + .partition_splitter + .convert_key_to_value(partition_key)?; + let delta_result = writer.close(Some(partition_value)).await?; + res.push(delta_result); + } + Ok(res) + } +} + +const DATA_FILES: &str = "data_files"; +const DELETE_FILES: &str = "delete_files"; + #[derive(Default, Debug)] struct WriteResult { data_files: Vec, + delete_files: Vec, } -impl<'a> TryFrom<&'a SinkMetadata> for WriteResult { - type Error = SinkError; - - fn try_from(value: &'a SinkMetadata) -> std::result::Result { +impl WriteResult { + fn try_from(value: &SinkMetadata, partition_type: &Any) -> Result { if let Some(Serialized(v)) = &value.metadata { - if let Value::Array(json_values) = + let mut values = if let serde_json::Value::Object(v) = serde_json::from_slice::(&v.metadata).map_err( |e| -> SinkError { anyhow!("Can't parse iceberg sink metadata: {}", e).into() }, - )? + )? { + v + } else { + return Err(anyhow!("iceberg sink metadata should be a object").into()); + }; + + let data_files: Vec; + let delete_files: Vec; + if let serde_json::Value::Array(values) = values + .remove(DATA_FILES) + .ok_or_else(|| anyhow!("icberg sink metadata should have data_files object"))? { - let data_files = json_values + data_files = values .into_iter() - .map(data_file_from_json) + .map(|value| data_file_from_json(value, partition_type.clone())) + .collect::, icelake::Error>>() + .unwrap(); + } else { + return Err(anyhow!("icberg sink metadata should have data_files object").into()); + } + if let serde_json::Value::Array(values) = values + .remove(DELETE_FILES) + .ok_or_else(|| anyhow!("icberg sink metadata should have data_files object"))? + { + delete_files = values + .into_iter() + .map(|value| data_file_from_json(value, partition_type.clone())) .collect::, icelake::Error>>() .map_err(|e| anyhow!("Failed to parse data file from json: {}", e))?; - Ok(WriteResult { data_files }) } else { - Err(anyhow!("Serialized data files should be json array!").into()) + return Err(anyhow!("icberg sink metadata should have data_files object").into()); } + Ok(Self { + data_files, + delete_files, + }) } else { Err(anyhow!("Can't create iceberg sink write result from empty data!").into()) } @@ -342,7 +794,7 @@ impl<'a> TryFrom<&'a WriteResult> for SinkMetadata { type Error = SinkError; fn try_from(value: &'a WriteResult) -> std::result::Result { - let json_value = serde_json::Value::Array( + let json_data_files = serde_json::Value::Array( value .data_files .iter() @@ -351,6 +803,23 @@ impl<'a> TryFrom<&'a WriteResult> for SinkMetadata { .collect::, icelake::Error>>() .map_err(|e| anyhow!("Can't serialize data files to json: {}", e))?, ); + let json_delete_files = serde_json::Value::Array( + value + .delete_files + .iter() + .cloned() + .map(data_file_to_json) + .collect::, icelake::Error>>() + .map_err(|e| anyhow!("Can't serialize data files to json: {}", e))?, + ); + let json_value = serde_json::Value::Object( + vec![ + (DATA_FILES.to_string(), json_data_files), + (DELETE_FILES.to_string(), json_delete_files), + ] + .into_iter() + .collect(), + ); Ok(SinkMetadata { metadata: Some(Serialized(SerializedMetadata { metadata: serde_json::to_vec(&json_value).map_err(|e| -> SinkError { @@ -361,60 +830,9 @@ impl<'a> TryFrom<&'a WriteResult> for SinkMetadata { } } -#[async_trait] -impl SinkWriter for IcebergWriter { - type CommitMetadata = Option; - - /// Begin a new epoch - async fn begin_epoch(&mut self, _epoch: u64) -> Result<()> { - // Just skip it. - Ok(()) - } - - /// Write a stream chunk to sink - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { - if self.is_append_only { - self.append_only_write(chunk).await - } else { - return Err(SinkError::Iceberg(anyhow!( - "Iceberg sink only support append-only mode now." - ))); - } - } - - /// Receive a barrier and mark the end of current epoch. When `is_checkpoint` is true, the sink - /// writer should commit the current epoch. - async fn barrier(&mut self, is_checkpoint: bool) -> Result> { - // Skip it if not checkpoint - if !is_checkpoint { - return Ok(None); - } - - let old_writer = std::mem::replace( - &mut self.writer, - self.table - .task_writer() - .await - .map_err(|err| SinkError::Iceberg(anyhow!(err)))?, - ); - - let data_files = old_writer - .close() - .await - .map_err(|err| SinkError::Iceberg(anyhow!("Close writer fail: {}", err)))?; - - Ok(Some(SinkMetadata::try_from(&WriteResult { data_files })?)) - } - - /// Clean up - async fn abort(&mut self) -> Result<()> { - // TODO: abort should clean up all the data written in this epoch. - Ok(()) - } -} - pub struct IcebergSinkCommitter { table: Table, + partition_type: Any, } #[async_trait::async_trait] @@ -429,15 +847,14 @@ impl SinkCommitCoordinator for IcebergSinkCommitter { let write_results = metadata .iter() - .map(WriteResult::try_from) + .map(|meta| WriteResult::try_from(meta, &self.partition_type)) .collect::>>()?; let mut txn = Transaction::new(&mut self.table); - txn.append_file( - write_results - .into_iter() - .flat_map(|s| s.data_files.into_iter()), - ); + write_results.into_iter().for_each(|s| { + txn.append_data_file(s.data_files); + txn.append_delete_file(s.delete_files); + }); txn.commit() .await .map_err(|err| SinkError::Iceberg(anyhow!(err)))?; diff --git a/src/connector/src/sink/kafka.rs b/src/connector/src/sink/kafka.rs index 4c7fc317edd3e..f77b2b0a88c36 100644 --- a/src/connector/src/sink/kafka.rs +++ b/src/connector/src/sink/kafka.rs @@ -15,46 +15,39 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::Duration; use anyhow::anyhow; -use futures_async_stream::for_await; -use rdkafka::error::{KafkaError, KafkaResult}; +use futures::{Future, FutureExt, TryFuture}; +use rdkafka::error::KafkaError; use rdkafka::message::ToBytes; use rdkafka::producer::{DeliveryFuture, FutureProducer, FutureRecord}; use rdkafka::types::RDKafkaErrorCode; use rdkafka::ClientConfig; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; -use risingwave_rpc_client::ConnectorClient; use serde_derive::{Deserialize, Serialize}; -use serde_json::Value; use serde_with::{serde_as, DisplayFromStr}; +use strum_macros::{Display, EnumString}; -use super::{ - Sink, SinkError, SinkParam, SINK_TYPE_APPEND_ONLY, SINK_TYPE_DEBEZIUM, SINK_TYPE_OPTION, - SINK_TYPE_UPSERT, -}; +use super::catalog::{SinkFormat, SinkFormatDesc}; +use super::{Sink, SinkError, SinkParam}; use crate::common::KafkaCommon; -use crate::sink::utils::{ - gen_append_only_message_stream, gen_debezium_message_stream, gen_upsert_message_stream, - AppendOnlyAdapterOpts, DebeziumAdapterOpts, UpsertAdapterOpts, -}; -use crate::sink::{ - DummySinkCommitCoordinator, Result, SinkWriterParam, SinkWriterV1, SinkWriterV1Adapter, +use crate::sink::catalog::desc::SinkDesc; +use crate::sink::formatter::SinkFormatterImpl; +use crate::sink::log_store::DeliveryFutureManagerAddFuture; +use crate::sink::writer::{ + AsyncTruncateLogSinkerOf, AsyncTruncateSinkWriter, AsyncTruncateSinkWriterExt, FormattedSink, }; +use crate::sink::{DummySinkCommitCoordinator, Result, SinkWriterParam}; use crate::source::kafka::{KafkaProperties, KafkaSplitEnumerator, PrivateLinkProducerContext}; use crate::source::{SourceEnumeratorContext, SplitEnumerator}; use crate::{ - deserialize_bool_from_string, deserialize_duration_from_string, deserialize_u32_from_string, + deserialize_duration_from_string, deserialize_u32_from_string, dispatch_sink_formatter_impl, }; pub const KAFKA_SINK: &str = "kafka"; -const fn _default_timeout() -> Duration { - Duration::from_secs(5) -} - const fn _default_max_retries() -> u32 { 3 } @@ -63,17 +56,34 @@ const fn _default_retry_backoff() -> Duration { Duration::from_millis(100) } -const fn _default_use_transaction() -> bool { - false +const fn _default_message_timeout_ms() -> usize { + 5000 +} + +const fn _default_max_in_flight_requests_per_connection() -> usize { + 5 } -const fn _default_force_append_only() -> bool { - false +#[derive(Debug, Clone, PartialEq, Display, Serialize, Deserialize, EnumString)] +#[strum(serialize_all = "snake_case")] +enum CompressionCodec { + None, + Gzip, + Snappy, + Lz4, + Zstd, } +/// See +/// for the detailed meaning of these librdkafka producer properties #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RdKafkaPropertiesProducer { + /// Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. + #[serde(rename = "properties.allow.auto.create.topics")] + #[serde_as(as = "Option")] + pub allow_auto_create_topics: Option, + /// Maximum number of messages allowed on the producer queue. This queue is shared by all /// topics and partitions. A value of 0 disables this limit. #[serde(rename = "properties.queue.buffering.max.messages")] @@ -126,10 +136,36 @@ pub struct RdKafkaPropertiesProducer { #[serde(rename = "properties.batch.size")] #[serde_as(as = "Option")] batch_size: Option, + + /// Compression codec to use for compressing message sets. + #[serde(rename = "properties.compression.codec")] + #[serde_as(as = "Option")] + compression_codec: Option, + + /// Produce message timeout. + /// This value is used to limits the time a produced message waits for + /// successful delivery (including retries). + #[serde( + rename = "properties.message.timeout.ms", + default = "_default_message_timeout_ms" + )] + #[serde_as(as = "DisplayFromStr")] + message_timeout_ms: usize, + + /// The maximum number of unacknowledged requests the client will send on a single connection before blocking. + #[serde( + rename = "properties.max.in.flight.requests.per.connection", + default = "_default_max_in_flight_requests_per_connection" + )] + #[serde_as(as = "DisplayFromStr")] + max_in_flight_requests_per_connection: usize, } impl RdKafkaPropertiesProducer { pub(crate) fn set_client(&self, c: &mut rdkafka::ClientConfig) { + if let Some(v) = self.allow_auto_create_topics { + c.set("allow.auto.create.topics", v.to_string()); + } if let Some(v) = self.queue_buffering_max_messages { c.set("queue.buffering.max.messages", v.to_string()); } @@ -154,6 +190,14 @@ impl RdKafkaPropertiesProducer { if let Some(v) = self.batch_size { c.set("batch.size", v.to_string()); } + if let Some(v) = &self.compression_codec { + c.set("compression.codec", v.to_string()); + } + c.set("message.timeout.ms", self.message_timeout_ms.to_string()); + c.set( + "max.in.flight.requests.per.connection", + self.max_in_flight_requests_per_connection.to_string(), + ); } } @@ -168,21 +212,6 @@ pub struct KafkaConfig { #[serde(flatten)] pub common: KafkaCommon, - pub r#type: String, // accept "append-only", "debezium", or "upsert" - - #[serde( - default = "_default_force_append_only", - deserialize_with = "deserialize_bool_from_string" - )] - pub force_append_only: bool, - - #[serde( - rename = "properties.timeout", - default = "_default_timeout", - deserialize_with = "deserialize_duration_from_string" - )] - pub timeout: Duration, - #[serde( rename = "properties.retry.max", default = "_default_max_retries", @@ -197,12 +226,6 @@ pub struct KafkaConfig { )] pub retry_interval: Duration, - #[serde( - default = "_default_use_transaction", - deserialize_with = "deserialize_bool_from_string" - )] - pub use_transaction: bool, - /// We have parsed the primary key for an upsert kafka sink into a `usize` vector representing /// the indices of the pk columns in the frontend, so we simply store the primary key here /// as a string. @@ -217,18 +240,6 @@ impl KafkaConfig { let config = serde_json::from_value::(serde_json::to_value(values).unwrap()) .map_err(|e| SinkError::Config(anyhow!(e)))?; - if config.r#type != SINK_TYPE_APPEND_ONLY - && config.r#type != SINK_TYPE_DEBEZIUM - && config.r#type != SINK_TYPE_UPSERT - { - return Err(SinkError::Config(anyhow!( - "`{}` must be {}, {}, or {}", - SINK_TYPE_OPTION, - SINK_TYPE_APPEND_ONLY, - SINK_TYPE_DEBEZIUM, - SINK_TYPE_UPSERT - ))); - } Ok(config) } @@ -260,95 +271,124 @@ pub struct KafkaSink { pub config: KafkaConfig, schema: Schema, pk_indices: Vec, - is_append_only: bool, + format_desc: SinkFormatDesc, db_name: String, sink_from_name: String, } -impl KafkaSink { - pub fn new(config: KafkaConfig, param: SinkParam) -> Self { - Self { +impl TryFrom for KafkaSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + let schema = param.schema(); + let config = KafkaConfig::from_hashmap(param.properties)?; + Ok(Self { config, - schema: param.schema(), - pk_indices: param.pk_indices, - is_append_only: param.sink_type.is_append_only(), + schema, + pk_indices: param.downstream_pk, + format_desc: param + .format_desc + .ok_or_else(|| SinkError::Config(anyhow!("missing FORMAT ... ENCODE ...")))?, db_name: param.db_name, sink_from_name: param.sink_from_name, - } + }) } } -#[async_trait::async_trait] impl Sink for KafkaSink { type Coordinator = DummySinkCommitCoordinator; - type Writer = SinkWriterV1Adapter; - - async fn new_writer(&self, writer_param: SinkWriterParam) -> Result { - Ok(SinkWriterV1Adapter::new( - KafkaSinkWriter::new( - self.config.clone(), - self.schema.clone(), - self.pk_indices.clone(), - self.is_append_only, - self.db_name.clone(), - self.sink_from_name.clone(), - format!("sink-{:?}", writer_param.executor_id), - ) - .await?, - )) + type LogSinker = AsyncTruncateLogSinkerOf; + + const SINK_NAME: &'static str = KAFKA_SINK; + + fn default_sink_decouple(desc: &SinkDesc) -> bool { + desc.sink_type.is_append_only() + } + + async fn new_log_sinker(&self, _writer_param: SinkWriterParam) -> Result { + let formatter = SinkFormatterImpl::new( + &self.format_desc, + self.schema.clone(), + self.pk_indices.clone(), + self.db_name.clone(), + self.sink_from_name.clone(), + ) + .await?; + let max_delivery_buffer_size = (self + .config + .rdkafka_properties + .queue_buffering_max_messages + .as_ref() + .cloned() + .unwrap_or(KAFKA_WRITER_MAX_QUEUE_SIZE) as f32 + * KAFKA_WRITER_MAX_QUEUE_SIZE_RATIO) as usize; + + Ok(KafkaSinkWriter::new(self.config.clone(), formatter) + .await? + .into_log_sinker(max_delivery_buffer_size)) } - async fn validate(&self, _client: Option) -> Result<()> { + async fn validate(&self) -> Result<()> { // For upsert Kafka sink, the primary key must be defined. - if !self.is_append_only && self.pk_indices.is_empty() { + if self.format_desc.format != SinkFormat::AppendOnly && self.pk_indices.is_empty() { return Err(SinkError::Config(anyhow!( - "primary key not defined for {} kafka sink (please define in `primary_key` field)", - self.config.r#type + "primary key not defined for {:?} kafka sink (please define in `primary_key` field)", + self.format_desc.format ))); } + // Check for formatter constructor error, before it is too late for error reporting. + SinkFormatterImpl::new( + &self.format_desc, + self.schema.clone(), + self.pk_indices.clone(), + self.db_name.clone(), + self.sink_from_name.clone(), + ) + .await?; // Try Kafka connection. // There is no such interface for kafka producer to validate a connection // use enumerator to validate broker reachability and existence of topic - let mut ticker = KafkaSplitEnumerator::new( + let check = KafkaSplitEnumerator::new( KafkaProperties::from(self.config.clone()), Arc::new(SourceEnumeratorContext::default()), ) .await?; - _ = ticker.list_splits().await?; + if !check.check_reachability().await { + return Err(SinkError::Config(anyhow!( + "cannot connect to kafka broker ({})", + self.config.common.brokers + ))); + } Ok(()) } } -#[derive(Debug, Clone, PartialEq, enum_as_inner::EnumAsInner)] -enum KafkaSinkState { - Init, - // State running with epoch. - Running(u64), +/// When the `DeliveryFuture` the current `future_delivery_buffer` +/// is buffering is greater than `queue_buffering_max_messages` * `KAFKA_WRITER_MAX_QUEUE_SIZE_RATIO`, +/// then enforcing commit once +const KAFKA_WRITER_MAX_QUEUE_SIZE_RATIO: f32 = 1.2; +/// The default queue size used to enforce a commit in kafka producer if `queue.buffering.max.messages` is not specified. +/// This default value is determined based on the librdkafka default. See the following doc for more details: +/// +const KAFKA_WRITER_MAX_QUEUE_SIZE: usize = 100000; + +struct KafkaPayloadWriter<'a> { + inner: &'a FutureProducer, + add_future: DeliveryFutureManagerAddFuture<'a, KafkaSinkDeliveryFuture>, + config: &'a KafkaConfig, } +pub type KafkaSinkDeliveryFuture = impl TryFuture + Unpin + 'static; + pub struct KafkaSinkWriter { - pub config: KafkaConfig, - pub inner: FutureProducer, - identifier: String, - state: KafkaSinkState, - schema: Schema, - pk_indices: Vec, - is_append_only: bool, - db_name: String, - sink_from_name: String, + formatter: SinkFormatterImpl, + inner: FutureProducer, + config: KafkaConfig, } impl KafkaSinkWriter { - pub async fn new( - mut config: KafkaConfig, - schema: Schema, - pk_indices: Vec, - is_append_only: bool, - db_name: String, - sink_from_name: String, - identifier: String, - ) -> Result { + async fn new(config: KafkaConfig, formatter: SinkFormatterImpl) -> Result { let inner: FutureProducer = { let mut c = ClientConfig::new(); @@ -357,10 +397,7 @@ impl KafkaSinkWriter { config.set_client(&mut c); // ClientConfig configuration - c.set("bootstrap.servers", &config.common.brokers) - .set("message.timeout.ms", "5000"); - // Note that we will not use transaction during sinking, thus set it to false - config.use_transaction = false; + c.set("bootstrap.servers", &config.common.brokers); // Create the producer context, will be used to create the producer let producer_ctx = PrivateLinkProducerContext::new( @@ -375,197 +412,146 @@ impl KafkaSinkWriter { }; Ok(KafkaSinkWriter { - config: config.clone(), + formatter, inner, - identifier, - state: KafkaSinkState::Init, - schema, - pk_indices, - is_append_only, - db_name, - sink_from_name, + config: config.clone(), }) } +} - /// The wrapper function for the actual `FutureProducer::send_result` - /// Just for better error handling purpose - #[expect(clippy::unused_async)] - async fn send_result_inner<'a, K, P>( - &'a self, - record: FutureRecord<'a, K, P>, - ) -> core::result::Result)> - where - K: ToBytes + ?Sized, - P: ToBytes + ?Sized, - { - self.inner.send_result(record) +impl AsyncTruncateSinkWriter for KafkaSinkWriter { + type DeliveryFuture = KafkaSinkDeliveryFuture; + + async fn write_chunk<'a>( + &'a mut self, + chunk: StreamChunk, + add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>, + ) -> Result<()> { + let mut payload_writer = KafkaPayloadWriter { + inner: &mut self.inner, + add_future, + config: &self.config, + }; + dispatch_sink_formatter_impl!(&self.formatter, formatter, { + payload_writer.write_chunk(chunk, formatter).await + }) } +} +impl<'w> KafkaPayloadWriter<'w> { /// The actual `send_result` function, will be called when the `KafkaSinkWriter` needs to sink /// messages - async fn send_result<'a, K, P>(&'a self, mut record: FutureRecord<'a, K, P>) -> KafkaResult<()> + async fn send_result<'a, K, P>(&'a mut self, mut record: FutureRecord<'a, K, P>) -> Result<()> where K: ToBytes + ?Sized, P: ToBytes + ?Sized, { - // The error to be returned - let mut err = KafkaError::Canceled; - - for _ in 0..self.config.max_retry_num { - match self.send_result_inner(record).await { - Ok(delivery_future) => match delivery_future.await { - Ok(delivery_future_result) => match delivery_future_result { - // Successfully sent the record - // Will return the partition and offset of the message (i32, i64) - Ok(_) => return Ok(()), - // If the message failed to be delivered. (i.e., flush) - // The error & the copy of the original message will be returned - // i.e., (KafkaError, OwnedMessage) - // We will just stop the loop, and return the error - // The sink executor will back to the latest checkpoint - Err((k_err, _msg)) => { - err = k_err; - break; - } - }, - // Nothing to do here, since the err has already been set to - // KafkaError::Canceled. This represents the producer is dropped - // before the delivery status is received - Err(_) => break, - }, + let mut success_flag = false; + + let mut ret = Ok(()); + + for i in 0..self.config.max_retry_num { + match self.inner.send_result(record) { + Ok(delivery_future) => { + if self + .add_future + .add_future_may_await(Self::map_delivery_future(delivery_future)) + .await? + { + tracing::warn!( + "Number of records being delivered ({}) >= expected kafka producer queue size ({}). + This indicates the default value of queue.buffering.max.messages has changed.", + self.add_future.future_count(), + self.add_future.max_future_count() + ); + } + success_flag = true; + break; + } // The enqueue buffer is full, `send_result` will immediately return // We can retry for another round after sleeping for sometime Err((e, rec)) => { - err = e; + tracing::warn!( + "producing message (key {:?}) to topic {} failed, err {:?}.", + rec.key.map(|k| k.to_bytes()), + rec.topic, + e + ); record = rec; - match err { + match e { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { - tokio::time::sleep(self.config.retry_interval).await; + tracing::warn!( + "Producer queue full. Delivery future buffer size={}. Await and retry #{}", + self.add_future.future_count(), + i + ); + self.add_future.await_one_delivery().await?; continue; } - _ => break, + _ => return Err(e.into()), } } } } - Err(err) - } - - async fn write_json_objects( - &self, - event_key_object: Option, - event_object: Option, - ) -> Result<()> { - // here we assume the key part always exists and value part is optional. - // if value is None, we will skip the payload part. - let key_str = event_key_object.unwrap().to_string(); - let mut record = FutureRecord::<[u8], [u8]>::to(self.config.common.topic.as_str()) - .key(key_str.as_bytes()); - let payload; - if let Some(value) = event_object { - payload = value.to_string(); - record = record.payload(payload.as_bytes()); + if !success_flag { + // In this case, after trying `max_retry_num` + // The enqueue buffer is still full + ret = Err(KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull).into()); } - self.send_result(record).await?; - Ok(()) - } - async fn debezium_update(&self, chunk: StreamChunk, ts_ms: u64) -> Result<()> { - let dbz_stream = gen_debezium_message_stream( - &self.schema, - &self.pk_indices, - chunk, - ts_ms, - DebeziumAdapterOpts::default(), - &self.db_name, - &self.sink_from_name, - ); - - #[for_await] - for msg in dbz_stream { - let (event_key_object, event_object) = msg?; - self.write_json_objects(event_key_object, event_object) - .await?; - } - Ok(()) + ret } - async fn upsert(&self, chunk: StreamChunk) -> Result<()> { - let upsert_stream = gen_upsert_message_stream( - &self.schema, - &self.pk_indices, - chunk, - UpsertAdapterOpts::default(), - ); - - #[for_await] - for msg in upsert_stream { - let (event_key_object, event_object) = msg?; - self.write_json_objects(event_key_object, event_object) - .await?; + async fn write_inner( + &mut self, + event_key_object: Option>, + event_object: Option>, + ) -> Result<()> { + let topic = self.config.common.topic.clone(); + let mut record = FutureRecord::<[u8], [u8]>::to(topic.as_str()); + if let Some(key_str) = &event_key_object { + record = record.key(key_str); } - Ok(()) - } - - async fn append_only(&self, chunk: StreamChunk) -> Result<()> { - let append_only_stream = gen_append_only_message_stream( - &self.schema, - &self.pk_indices, - chunk, - AppendOnlyAdapterOpts::default(), - ); - - #[for_await] - for msg in append_only_stream { - let (event_key_object, event_object) = msg?; - self.write_json_objects(event_key_object, event_object) - .await?; + if let Some(payload) = &event_object { + record = record.payload(payload); } + // Send the data but not wait it to finish sinking + // Will join all `DeliveryFuture` during commit + self.send_result(record).await?; Ok(()) } -} -#[async_trait::async_trait] -impl SinkWriterV1 for KafkaSinkWriter { - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { - if self.is_append_only { - // Append-only - self.append_only(chunk).await - } else { - // Debezium - if self.config.r#type == SINK_TYPE_DEBEZIUM { - self.debezium_update( - chunk, - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_millis() as u64, - ) - .await - } else { - // Upsert - self.upsert(chunk).await - } + fn map_future_result(delivery_future_result: ::Output) -> Result<()> { + match delivery_future_result { + // Successfully sent the record + // Will return the partition and offset of the message (i32, i64) + // Note that `Vec<()>` won't cause memory allocation + Ok(Ok(_)) => Ok(()), + // If the message failed to be delivered. (i.e., flush) + // The error & the copy of the original message will be returned + // i.e., (KafkaError, OwnedMessage) + // We will just stop the loop, and return the error + // The sink executor will back to the latest checkpoint + Ok(Err((k_err, _msg))) => Err(k_err.into()), + // This represents the producer is dropped + // before the delivery status is received + // Return `KafkaError::Canceled` + Err(_) => Err(KafkaError::Canceled.into()), } } - /// --------------------------------------------------------------------------------------- - /// Note: The following functions are just to satisfy `SinkWriterV1` trait | - /// We do not need transaction-related functionality for sink executor, return Ok(()) | - /// --------------------------------------------------------------------------------------- - // Note that epoch 0 is reserved for initializing, so we should not use epoch 0 for - // transaction. - async fn begin_epoch(&mut self, _epoch: u64) -> Result<()> { - Ok(()) + fn map_delivery_future(future: DeliveryFuture) -> KafkaSinkDeliveryFuture { + future.map(KafkaPayloadWriter::<'static>::map_future_result) } +} - async fn commit(&mut self) -> Result<()> { - Ok(()) - } +impl<'a> FormattedSink for KafkaPayloadWriter<'a> { + type K = Vec; + type V = Vec; - async fn abort(&mut self) -> Result<()> { - Ok(()) + async fn write_one(&mut self, k: Option, v: Option) -> Result<()> { + self.write_inner(k, v).await } } @@ -573,11 +559,11 @@ impl SinkWriterV1 for KafkaSinkWriter { mod test { use maplit::hashmap; use risingwave_common::catalog::Field; - use risingwave_common::test_prelude::StreamChunkTestExt; use risingwave_common::types::DataType; use super::*; - use crate::sink::utils::*; + use crate::sink::encoder::{JsonEncoder, TimestampHandlingMode}; + use crate::sink::formatter::AppendOnlyFormatter; #[test] fn parse_rdkafka_props() { @@ -599,12 +585,24 @@ mod test { "properties.retry.backoff.ms".to_string() => "114514".to_string(), "properties.batch.num.messages".to_string() => "114514".to_string(), "properties.batch.size".to_string() => "114514".to_string(), + "properties.compression.codec".to_string() => "zstd".to_string(), + "properties.message.timeout.ms".to_string() => "114514".to_string(), + "properties.max.in.flight.requests.per.connection".to_string() => "114514".to_string(), }; let c = KafkaConfig::from_hashmap(props).unwrap(); assert_eq!( c.rdkafka_properties.queue_buffering_max_ms, Some(114.514f64) ); + assert_eq!( + c.rdkafka_properties.compression_codec, + Some(CompressionCodec::Zstd) + ); + assert_eq!(c.rdkafka_properties.message_timeout_ms, 114514); + assert_eq!( + c.rdkafka_properties.max_in_flight_requests_per_connection, + 114514 + ); let props: HashMap = hashmap! { // basic @@ -626,6 +624,16 @@ mod test { "properties.queue.buffering.max.kbytes".to_string() => "-114514".to_string(), // usize cannot be negative }; assert!(KafkaConfig::from_hashmap(props).is_err()); + + let props: HashMap = hashmap! { + // basic + "connector".to_string() => "kafka".to_string(), + "properties.bootstrap.server".to_string() => "localhost:9092".to_string(), + "topic".to_string() => "test".to_string(), + "type".to_string() => "append-only".to_string(), + "properties.compression.codec".to_string() => "notvalid".to_string(), // has to be a valid CompressionCodec + }; + assert!(KafkaConfig::from_hashmap(props).is_err()); } #[test] @@ -636,22 +644,16 @@ mod test { "topic".to_string() => "test".to_string(), "type".to_string() => "append-only".to_string(), "force_append_only".to_string() => "true".to_string(), - "use_transaction".to_string() => "False".to_string(), "properties.security.protocol".to_string() => "SASL".to_string(), "properties.sasl.mechanism".to_string() => "SASL".to_string(), "properties.sasl.username".to_string() => "test".to_string(), "properties.sasl.password".to_string() => "test".to_string(), - "properties.timeout".to_string() => "10s".to_string(), "properties.retry.max".to_string() => "20".to_string(), "properties.retry.interval".to_string() => "500ms".to_string(), }; let config = KafkaConfig::from_hashmap(properties).unwrap(); assert_eq!(config.common.brokers, "localhost:9092"); assert_eq!(config.common.topic, "test"); - assert_eq!(config.r#type, "append-only"); - assert!(config.force_append_only); - assert!(!config.use_transaction); - assert_eq!(config.timeout, Duration::from_secs(10)); assert_eq!(config.max_retry_num, 20); assert_eq!(config.retry_interval, Duration::from_millis(500)); @@ -663,9 +665,6 @@ mod test { "type".to_string() => "upsert".to_string(), }; let config = KafkaConfig::from_hashmap(properties).unwrap(); - assert!(!config.force_append_only); - assert!(!config.use_transaction); - assert_eq!(config.timeout, Duration::from_secs(5)); assert_eq!(config.max_retry_num, 3); assert_eq!(config.retry_interval, Duration::from_millis(100)); @@ -679,16 +678,6 @@ mod test { }; assert!(KafkaConfig::from_hashmap(properties).is_err()); - // Invalid bool input. - let properties: HashMap = hashmap! { - "connector".to_string() => "kafka".to_string(), - "properties.bootstrap.server".to_string() => "localhost:9092".to_string(), - "topic".to_string() => "test".to_string(), - "type".to_string() => "upsert".to_string(), - "force_append_only".to_string() => "yes".to_string(), // error! - }; - assert!(KafkaConfig::from_hashmap(properties).is_err()); - // Invalid duration input. let properties: HashMap = hashmap! { "connector".to_string() => "kafka".to_string(), @@ -701,7 +690,7 @@ mod test { } /// Note: Please enable the kafka by running `./risedev configure` before commenting #[ignore] - /// to run the test + /// to run the test, also remember to modify `risedev.yml` #[ignore] #[tokio::test] async fn test_kafka_producer() -> Result<()> { @@ -711,6 +700,7 @@ mod test { "properties.bootstrap.server".to_string() => "localhost:29092".to_string(), "type".to_string() => "append-only".to_string(), "topic".to_string() => "test_topic".to_string(), + "properties.compression.codec".to_string() => "zstd".to_string(), }; // Create a table with two columns (| id : INT32 | v2 : VARCHAR |) here @@ -729,29 +719,33 @@ mod test { }, ]); - // We do not specify primary key for this schema - let pk_indices = vec![]; let kafka_config = KafkaConfig::from_hashmap(properties)?; // Create the actual sink writer to Kafka - let mut sink = KafkaSinkWriter::new( + let sink = KafkaSinkWriter::new( kafka_config.clone(), - schema, - pk_indices, - true, - "test_sink_1".to_string(), - "test_db".into(), - "test_table".into(), + SinkFormatterImpl::AppendOnlyJson(AppendOnlyFormatter::new( + // We do not specify primary key for this schema + None, + JsonEncoder::new(schema, None, TimestampHandlingMode::Milli), + )), ) .await .unwrap(); + use crate::sink::log_store::DeliveryFutureManager; + + let mut future_manager = DeliveryFutureManager::new(usize::MAX); + for i in 0..10 { - let mut fail_flag = false; - sink.begin_epoch(i).await?; println!("epoch: {}", i); for j in 0..100 { - match sink + let mut writer = KafkaPayloadWriter { + inner: &sink.inner, + add_future: future_manager.start_write_chunk(i, j), + config: &sink.config, + }; + match writer .send_result( FutureRecord::to(kafka_config.common.topic.as_str()) .payload(format!("value-{}", j).as_bytes()) @@ -761,83 +755,13 @@ mod test { { Ok(_) => {} Err(e) => { - fail_flag = true; println!("{:?}", e); - sink.abort().await?; + break; } }; } - if !fail_flag { - sink.commit().await?; - println!("commit success"); - } } Ok(()) } - - #[test] - fn test_chunk_to_json() -> Result<()> { - let chunk = StreamChunk::from_pretty( - " i f {i,f} - + 0 0.0 {0,0.0} - + 1 1.0 {1,1.0} - + 2 2.0 {2,2.0} - + 3 3.0 {3,3.0} - + 4 4.0 {4,4.0} - + 5 5.0 {5,5.0} - + 6 6.0 {6,6.0} - + 7 7.0 {7,7.0} - + 8 8.0 {8,8.0} - + 9 9.0 {9,9.0}", - ); - - let schema = Schema::new(vec![ - Field { - data_type: DataType::Int32, - name: "v1".into(), - sub_fields: vec![], - type_name: "".into(), - }, - Field { - data_type: DataType::Float32, - name: "v2".into(), - sub_fields: vec![], - type_name: "".into(), - }, - Field { - data_type: DataType::new_struct( - vec![DataType::Int32, DataType::Float32], - vec!["v4".to_string(), "v5".to_string()], - ), - name: "v3".into(), - sub_fields: vec![ - Field { - data_type: DataType::Int32, - name: "v4".into(), - sub_fields: vec![], - type_name: "".into(), - }, - Field { - data_type: DataType::Float32, - name: "v5".into(), - sub_fields: vec![], - type_name: "".into(), - }, - ], - type_name: "".into(), - }, - ]); - - let json_chunk = chunk_to_json(chunk, &schema).unwrap(); - let schema_json = schema_to_json(&schema, "test_db", "test_table"); - assert_eq!(schema_json, serde_json::from_str::("{\"fields\":[{\"field\":\"before\",\"fields\":[{\"field\":\"v1\",\"optional\":true,\"type\":\"int32\"},{\"field\":\"v2\",\"optional\":true,\"type\":\"float\"},{\"field\":\"v3\",\"optional\":true,\"type\":\"string\"}],\"name\":\"RisingWave.test_db.test_table.Key\",\"optional\":true,\"type\":\"struct\"},{\"field\":\"after\",\"fields\":[{\"field\":\"v1\",\"optional\":true,\"type\":\"int32\"},{\"field\":\"v2\",\"optional\":true,\"type\":\"float\"},{\"field\":\"v3\",\"optional\":true,\"type\":\"string\"}],\"name\":\"RisingWave.test_db.test_table.Key\",\"optional\":true,\"type\":\"struct\"},{\"field\":\"source\",\"fields\":[{\"field\":\"db\",\"optional\":false,\"type\":\"string\"},{\"field\":\"table\",\"optional\":true,\"type\":\"string\"}],\"name\":\"RisingWave.test_db.test_table.Source\",\"optional\":false,\"type\":\"struct\"},{\"field\":\"op\",\"optional\":false,\"type\":\"string\"},{\"field\":\"ts_ms\",\"optional\":false,\"type\":\"int64\"}],\"name\":\"RisingWave.test_db.test_table.Envelope\",\"optional\":false,\"type\":\"struct\"}").unwrap()); - assert_eq!( - serde_json::from_str::(&json_chunk[0]).unwrap(), - serde_json::from_str::("{\"v1\":0,\"v2\":0.0,\"v3\":{\"v4\":0,\"v5\":0.0}}") - .unwrap() - ); - - Ok(()) - } } diff --git a/src/connector/src/sink/kinesis.rs b/src/connector/src/sink/kinesis.rs index 4d76a3235e381..605edde3b1eb0 100644 --- a/src/connector/src/sink/kinesis.rs +++ b/src/connector/src/sink/kinesis.rs @@ -13,32 +13,30 @@ // limitations under the License. use std::collections::HashMap; -use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::anyhow; use aws_sdk_kinesis::error::DisplayErrorContext; use aws_sdk_kinesis::operation::put_record::PutRecordOutput; use aws_sdk_kinesis::primitives::Blob; use aws_sdk_kinesis::Client as KinesisClient; -use futures_async_stream::for_await; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; -use risingwave_rpc_client::ConnectorClient; use serde_derive::Deserialize; use serde_with::serde_as; use tokio_retry::strategy::{jitter, ExponentialBackoff}; use tokio_retry::Retry; +use super::catalog::SinkFormatDesc; use super::SinkParam; use crate::common::KinesisCommon; -use crate::sink::utils::{ - gen_append_only_message_stream, gen_debezium_message_stream, gen_upsert_message_stream, - AppendOnlyAdapterOpts, DebeziumAdapterOpts, UpsertAdapterOpts, -}; -use crate::sink::{ - DummySinkCommitCoordinator, Result, Sink, SinkError, SinkWriter, SinkWriterParam, - SINK_TYPE_APPEND_ONLY, SINK_TYPE_DEBEZIUM, SINK_TYPE_OPTION, SINK_TYPE_UPSERT, +use crate::dispatch_sink_formatter_impl; +use crate::sink::catalog::desc::SinkDesc; +use crate::sink::formatter::SinkFormatterImpl; +use crate::sink::log_store::DeliveryFutureManagerAddFuture; +use crate::sink::writer::{ + AsyncTruncateLogSinkerOf, AsyncTruncateSinkWriter, AsyncTruncateSinkWriterExt, FormattedSink, }; +use crate::sink::{DummySinkCommitCoordinator, Result, Sink, SinkError, SinkWriterParam}; pub const KINESIS_SINK: &str = "kinesis"; @@ -47,37 +45,57 @@ pub struct KinesisSink { pub config: KinesisSinkConfig, schema: Schema, pk_indices: Vec, - is_append_only: bool, + format_desc: SinkFormatDesc, db_name: String, sink_from_name: String, } -impl KinesisSink { - pub fn new(config: KinesisSinkConfig, param: SinkParam) -> Self { - Self { +impl TryFrom for KinesisSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + let schema = param.schema(); + let config = KinesisSinkConfig::from_hashmap(param.properties)?; + Ok(Self { config, - schema: param.schema(), - pk_indices: param.pk_indices, - is_append_only: param.sink_type.is_append_only(), + schema, + pk_indices: param.downstream_pk, + format_desc: param + .format_desc + .ok_or_else(|| SinkError::Config(anyhow!("missing FORMAT ... ENCODE ...")))?, db_name: param.db_name, sink_from_name: param.sink_from_name, - } + }) } } -#[async_trait::async_trait] impl Sink for KinesisSink { type Coordinator = DummySinkCommitCoordinator; - type Writer = KinesisSinkWriter; + type LogSinker = AsyncTruncateLogSinkerOf; + + const SINK_NAME: &'static str = KINESIS_SINK; - async fn validate(&self, _client: Option) -> Result<()> { - // For upsert Kafka sink, the primary key must be defined. - if !self.is_append_only && self.pk_indices.is_empty() { + fn default_sink_decouple(desc: &SinkDesc) -> bool { + desc.sink_type.is_append_only() + } + + async fn validate(&self) -> Result<()> { + // Kinesis requires partition key. There is no builtin support for round-robin as in kafka/pulsar. + // https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html#Streams-PutRecord-request-PartitionKey + if self.pk_indices.is_empty() { return Err(SinkError::Config(anyhow!( - "primary key not defined for {} kafka sink (please define in `primary_key` field)", - self.config.r#type + "kinesis sink requires partition key (please define in `primary_key` field)", ))); } + // Check for formatter constructor error, before it is too late for error reporting. + SinkFormatterImpl::new( + &self.format_desc, + self.schema.clone(), + self.pk_indices.clone(), + self.db_name.clone(), + self.sink_from_name.clone(), + ) + .await?; // check reachability let client = self.config.common.build_client().await?; @@ -93,16 +111,17 @@ impl Sink for KinesisSink { Ok(()) } - async fn new_writer(&self, _writer_env: SinkWriterParam) -> Result { - KinesisSinkWriter::new( + async fn new_log_sinker(&self, _writer_param: SinkWriterParam) -> Result { + Ok(KinesisSinkWriter::new( self.config.clone(), self.schema.clone(), self.pk_indices.clone(), - self.is_append_only, + &self.format_desc, self.db_name.clone(), self.sink_from_name.clone(), ) - .await + .await? + .into_log_sinker(usize::MAX)) } } @@ -111,8 +130,6 @@ impl Sink for KinesisSink { pub struct KinesisSinkConfig { #[serde(flatten)] pub common: KinesisCommon, - - pub r#type: String, // accept "append-only", "debezium", or "upsert" } impl KinesisSinkConfig { @@ -120,31 +137,19 @@ impl KinesisSinkConfig { let config = serde_json::from_value::(serde_json::to_value(properties).unwrap()) .map_err(|e| SinkError::Config(anyhow!(e)))?; - if config.r#type != SINK_TYPE_APPEND_ONLY - && config.r#type != SINK_TYPE_DEBEZIUM - && config.r#type != SINK_TYPE_UPSERT - { - return Err(SinkError::Config(anyhow!( - "`{}` must be {}, {}, or {}", - SINK_TYPE_OPTION, - SINK_TYPE_APPEND_ONLY, - SINK_TYPE_DEBEZIUM, - SINK_TYPE_UPSERT - ))); - } Ok(config) } } -#[derive(Debug)] pub struct KinesisSinkWriter { pub config: KinesisSinkConfig, - schema: Schema, - pk_indices: Vec, + formatter: SinkFormatterImpl, + payload_writer: KinesisSinkPayloadWriter, +} + +struct KinesisSinkPayloadWriter { client: KinesisClient, - is_append_only: bool, - db_name: String, - sink_from_name: String, + config: KinesisSinkConfig, } impl KinesisSinkWriter { @@ -152,27 +157,28 @@ impl KinesisSinkWriter { config: KinesisSinkConfig, schema: Schema, pk_indices: Vec, - is_append_only: bool, + format_desc: &SinkFormatDesc, db_name: String, sink_from_name: String, ) -> Result { + let formatter = + SinkFormatterImpl::new(format_desc, schema, pk_indices, db_name, sink_from_name) + .await?; let client = config .common .build_client() .await .map_err(SinkError::Kinesis)?; Ok(Self { - config, - schema, - pk_indices, - client, - is_append_only, - db_name, - sink_from_name, + config: config.clone(), + formatter, + payload_writer: KinesisSinkPayloadWriter { client, config }, }) } - - async fn put_record(&self, key: &str, payload: Blob) -> Result { +} +impl KinesisSinkPayloadWriter { + async fn put_record(&self, key: &str, payload: Vec) -> Result { + let payload = Blob::new(payload); // todo: switch to put_records() for batching Retry::spawn( ExponentialBackoff::from_millis(100).map(jitter).take(3), @@ -200,95 +206,32 @@ impl KinesisSinkWriter { )) }) } - - async fn upsert(&self, chunk: StreamChunk) -> Result<()> { - let upsert_stream = gen_upsert_message_stream( - &self.schema, - &self.pk_indices, - chunk, - UpsertAdapterOpts::default(), - ); - - crate::impl_load_stream_write_record!(upsert_stream, self.put_record); - Ok(()) - } - - async fn append_only(&self, chunk: StreamChunk) -> Result<()> { - let append_only_stream = gen_append_only_message_stream( - &self.schema, - &self.pk_indices, - chunk, - AppendOnlyAdapterOpts::default(), - ); - - crate::impl_load_stream_write_record!(append_only_stream, self.put_record); - Ok(()) - } - - async fn debezium_update(&self, chunk: StreamChunk, ts_ms: u64) -> Result<()> { - let dbz_stream = gen_debezium_message_stream( - &self.schema, - &self.pk_indices, - chunk, - ts_ms, - DebeziumAdapterOpts::default(), - &self.db_name, - &self.sink_from_name, - ); - - crate::impl_load_stream_write_record!(dbz_stream, self.put_record); - - Ok(()) - } } -#[async_trait::async_trait] -impl SinkWriter for KinesisSinkWriter { - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { - if self.is_append_only { - self.append_only(chunk).await - } else if self.config.r#type == SINK_TYPE_DEBEZIUM { - self.debezium_update( - chunk, - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_millis() as u64, - ) - .await - } else if self.config.r#type == SINK_TYPE_UPSERT { - self.upsert(chunk).await - } else { - unreachable!() - } - } - - async fn begin_epoch(&mut self, _epoch: u64) -> Result<()> { - // Kinesis offers no transactional guarantees, so we do nothing here. - Ok(()) - } +impl FormattedSink for KinesisSinkPayloadWriter { + type K = String; + type V = Vec; - async fn barrier(&mut self, _is_checkpoint: bool) -> Result<()> { - Ok(()) + async fn write_one(&mut self, k: Option, v: Option) -> Result<()> { + self.put_record( + &k.ok_or_else(|| SinkError::Kinesis(anyhow!("no key provided")))?, + v.unwrap_or_default(), + ) + .await + .map(|_| ()) } } -#[macro_export] -macro_rules! impl_load_stream_write_record { - ($stream:ident, $op_fn:stmt) => { - #[for_await] - for msg in $stream { - let (event_key_object, event_object) = msg?; - let key_str = event_key_object.unwrap().to_string(); - $op_fn( - &key_str, - Blob::new(if let Some(value) = event_object { - value.to_string().into_bytes() - } else { - vec![] - }), - ) - .await?; - } - }; +impl AsyncTruncateSinkWriter for KinesisSinkWriter { + async fn write_chunk<'a>( + &'a mut self, + chunk: StreamChunk, + _add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>, + ) -> Result<()> { + dispatch_sink_formatter_impl!( + &self.formatter, + formatter, + self.payload_writer.write_chunk(chunk, formatter).await + ) + } } diff --git a/src/connector/src/sink/log_store.rs b/src/connector/src/sink/log_store.rs new file mode 100644 index 0000000000000..f7d99141139f5 --- /dev/null +++ b/src/connector/src/sink/log_store.rs @@ -0,0 +1,751 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp::Ordering; +use std::collections::VecDeque; +use std::fmt::Debug; +use std::future::{poll_fn, Future}; +use std::sync::Arc; +use std::task::Poll; + +use anyhow::anyhow; +use futures::{TryFuture, TryFutureExt}; +use risingwave_common::array::StreamChunk; +use risingwave_common::buffer::Bitmap; +use risingwave_common::util::epoch::{EpochPair, INVALID_EPOCH}; + +use crate::sink::SinkMetrics; + +pub type LogStoreResult = Result; +pub type ChunkId = usize; + +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum TruncateOffset { + Chunk { epoch: u64, chunk_id: ChunkId }, + Barrier { epoch: u64 }, +} + +impl PartialOrd for TruncateOffset { + fn partial_cmp(&self, other: &Self) -> Option { + let extract = |offset: &TruncateOffset| match offset { + TruncateOffset::Chunk { epoch, chunk_id } => (*epoch, *chunk_id), + TruncateOffset::Barrier { epoch } => (*epoch, usize::MAX), + }; + let this = extract(self); + let other = extract(other); + this.partial_cmp(&other) + } +} + +impl TruncateOffset { + pub fn next_chunk_id(&self) -> ChunkId { + match self { + TruncateOffset::Chunk { chunk_id, .. } => chunk_id + 1, + TruncateOffset::Barrier { .. } => 0, + } + } + + pub fn epoch(&self) -> u64 { + match self { + TruncateOffset::Chunk { epoch, .. } | TruncateOffset::Barrier { epoch } => *epoch, + } + } + + pub fn check_next_item_epoch(&self, epoch: u64) -> LogStoreResult<()> { + match self { + TruncateOffset::Chunk { + epoch: offset_epoch, + .. + } => { + if epoch != *offset_epoch { + return Err(anyhow!( + "new item epoch {} not match current chunk offset epoch {}", + epoch, + offset_epoch + )); + } + } + TruncateOffset::Barrier { + epoch: offset_epoch, + } => { + if epoch <= *offset_epoch { + return Err(anyhow!( + "new item epoch {} not exceed barrier offset epoch {}", + epoch, + offset_epoch + )); + } + } + } + Ok(()) + } +} + +#[derive(Debug)] +pub enum LogStoreReadItem { + StreamChunk { + chunk: StreamChunk, + chunk_id: ChunkId, + }, + Barrier { + is_checkpoint: bool, + }, + UpdateVnodeBitmap(Arc), +} + +pub trait LogWriter: Send { + /// Initialize the log writer with an epoch + fn init(&mut self, epoch: EpochPair) -> impl Future> + Send + '_; + + /// Write a stream chunk to the log writer + fn write_chunk( + &mut self, + chunk: StreamChunk, + ) -> impl Future> + Send + '_; + + /// Mark current epoch as finished and sealed, and flush the unconsumed log data. + fn flush_current_epoch( + &mut self, + next_epoch: u64, + is_checkpoint: bool, + ) -> impl Future> + Send + '_; + + /// Update the vnode bitmap of the log writer + fn update_vnode_bitmap( + &mut self, + new_vnodes: Arc, + ) -> impl Future> + Send + '_; +} + +pub trait LogReader: Send + Sized + 'static { + /// Initialize the log reader. Usually function as waiting for log writer to be initialized. + fn init(&mut self) -> impl Future> + Send + '_; + + /// Emit the next item. + /// + /// The implementation should ensure that the future is cancellation safe. + fn next_item( + &mut self, + ) -> impl Future> + Send + '_; + + /// Mark that all items emitted so far have been consumed and it is safe to truncate the log + /// from the current offset. + fn truncate( + &mut self, + offset: TruncateOffset, + ) -> impl Future> + Send + '_; +} + +pub trait LogStoreFactory: 'static { + type Reader: LogReader + Send + 'static; + type Writer: LogWriter + Send + 'static; + + fn build(self) -> impl Future + Send; +} + +pub struct TransformChunkLogReader StreamChunk, R: LogReader> { + f: F, + inner: R, +} + +impl StreamChunk + Send + 'static, R: LogReader> LogReader + for TransformChunkLogReader +{ + fn init(&mut self) -> impl Future> + Send + '_ { + self.inner.init() + } + + async fn next_item(&mut self) -> LogStoreResult<(u64, LogStoreReadItem)> { + let (epoch, item) = self.inner.next_item().await?; + let item = match item { + LogStoreReadItem::StreamChunk { chunk, chunk_id } => LogStoreReadItem::StreamChunk { + chunk: (self.f)(chunk), + chunk_id, + }, + other => other, + }; + Ok((epoch, item)) + } + + fn truncate( + &mut self, + offset: TruncateOffset, + ) -> impl Future> + Send + '_ { + self.inner.truncate(offset) + } +} + +pub struct MonitoredLogReader { + inner: R, + read_epoch: u64, + metrics: SinkMetrics, +} + +impl LogReader for MonitoredLogReader { + async fn init(&mut self) -> LogStoreResult<()> { + self.inner.init().await + } + + async fn next_item(&mut self) -> LogStoreResult<(u64, LogStoreReadItem)> { + self.inner.next_item().await.inspect(|(epoch, item)| { + if self.read_epoch != *epoch { + self.read_epoch = *epoch; + self.metrics.log_store_latest_read_epoch.set(*epoch as _); + } + if let LogStoreReadItem::StreamChunk { chunk, .. } = item { + self.metrics + .log_store_read_rows + .inc_by(chunk.cardinality() as _); + } + }) + } + + async fn truncate(&mut self, offset: TruncateOffset) -> LogStoreResult<()> { + self.inner.truncate(offset).await + } +} + +#[easy_ext::ext(LogReaderExt)] +impl T +where + T: LogReader, +{ + pub fn transform_chunk StreamChunk + Sized>( + self, + f: F, + ) -> TransformChunkLogReader { + TransformChunkLogReader { f, inner: self } + } + + pub fn monitored(self, metrics: SinkMetrics) -> MonitoredLogReader { + MonitoredLogReader { + read_epoch: INVALID_EPOCH, + inner: self, + metrics, + } + } +} + +pub struct MonitoredLogWriter { + inner: W, + metrics: SinkMetrics, +} + +impl LogWriter for MonitoredLogWriter { + async fn init(&mut self, epoch: EpochPair) -> LogStoreResult<()> { + self.metrics + .log_store_first_write_epoch + .set(epoch.curr as _); + self.metrics + .log_store_latest_write_epoch + .set(epoch.curr as _); + self.inner.init(epoch).await + } + + async fn write_chunk(&mut self, chunk: StreamChunk) -> LogStoreResult<()> { + self.metrics + .log_store_write_rows + .inc_by(chunk.cardinality() as _); + self.inner.write_chunk(chunk).await + } + + async fn flush_current_epoch( + &mut self, + next_epoch: u64, + is_checkpoint: bool, + ) -> LogStoreResult<()> { + self.inner + .flush_current_epoch(next_epoch, is_checkpoint) + .await?; + self.metrics + .log_store_latest_write_epoch + .set(next_epoch as _); + Ok(()) + } + + async fn update_vnode_bitmap(&mut self, new_vnodes: Arc) -> LogStoreResult<()> { + self.inner.update_vnode_bitmap(new_vnodes).await + } +} + +#[easy_ext::ext(LogWriterExt)] +impl T +where + T: LogWriter + Sized, +{ + pub fn monitored(self, metrics: SinkMetrics) -> MonitoredLogWriter { + MonitoredLogWriter { + inner: self, + metrics, + } + } +} + +enum DeliveryFutureManagerItem { + Chunk { + chunk_id: ChunkId, + // earlier future at the front + futures: VecDeque, + }, + Barrier, +} + +pub struct DeliveryFutureManager { + future_count: usize, + max_future_count: usize, + // earlier items at the front + items: VecDeque<(u64, DeliveryFutureManagerItem)>, +} + +impl DeliveryFutureManager { + pub fn new(max_future_count: usize) -> Self { + Self { + future_count: 0, + max_future_count, + items: Default::default(), + } + } + + pub fn add_barrier(&mut self, epoch: u64) { + if let Some((item_epoch, last_item)) = self.items.back() { + match last_item { + DeliveryFutureManagerItem::Chunk { .. } => { + assert_eq!(*item_epoch, epoch) + } + DeliveryFutureManagerItem::Barrier => { + assert!( + epoch > *item_epoch, + "new barrier epoch {} should be greater than prev barrier {}", + epoch, + item_epoch + ); + } + } + } + self.items + .push_back((epoch, DeliveryFutureManagerItem::Barrier)); + } + + pub fn start_write_chunk( + &mut self, + epoch: u64, + chunk_id: ChunkId, + ) -> DeliveryFutureManagerAddFuture<'_, F> { + if let Some((item_epoch, item)) = self.items.back() { + match item { + DeliveryFutureManagerItem::Chunk { + chunk_id: item_chunk_id, + .. + } => { + assert_eq!(epoch, *item_epoch); + assert!( + chunk_id > *item_chunk_id, + "new chunk id {} should be greater than prev chunk id {}", + chunk_id, + item_chunk_id + ); + } + DeliveryFutureManagerItem::Barrier => { + assert!( + epoch > *item_epoch, + "new chunk epoch {} should be greater than prev barrier: {}", + epoch, + item_epoch + ); + } + } + } + self.items.push_back(( + epoch, + DeliveryFutureManagerItem::Chunk { + chunk_id, + futures: VecDeque::new(), + }, + )); + DeliveryFutureManagerAddFuture(self) + } +} + +pub struct DeliveryFutureManagerAddFuture<'a, F>(&'a mut DeliveryFutureManager); + +impl<'a, F: TryFuture + Unpin + 'static> DeliveryFutureManagerAddFuture<'a, F> { + /// Add a new future to the latest started written chunk. + /// The returned bool value indicate whether we have awaited on any previous futures. + pub async fn add_future_may_await(&mut self, future: F) -> Result { + let mut has_await = false; + while self.0.future_count >= self.0.max_future_count { + self.await_one_delivery().await?; + has_await = true; + } + match self.0.items.back_mut() { + Some((_, DeliveryFutureManagerItem::Chunk { futures, .. })) => { + futures.push_back(future); + self.0.future_count += 1; + Ok(has_await) + } + _ => unreachable!("should add future only after add a new chunk"), + } + } + + pub async fn await_one_delivery(&mut self) -> Result<(), F::Error> { + for (_, item) in &mut self.0.items { + if let DeliveryFutureManagerItem::Chunk {futures, ..} = item && let Some(mut delivery_future) = futures.pop_front() { + self.0.future_count -= 1; + return poll_fn(|cx| delivery_future.try_poll_unpin(cx)).await; + } else { + continue; + } + } + Ok(()) + } + + pub fn future_count(&self) -> usize { + self.0.future_count + } + + pub fn max_future_count(&self) -> usize { + self.0.max_future_count + } +} + +impl + Unpin + 'static> DeliveryFutureManager { + pub fn next_truncate_offset( + &mut self, + ) -> impl Future> + '_ { + poll_fn(move |cx| { + let mut latest_offset: Option = None; + 'outer: while let Some((epoch, item)) = self.items.front_mut() { + match item { + DeliveryFutureManagerItem::Chunk { chunk_id, futures } => { + while let Some(future) = futures.front_mut() { + match future.try_poll_unpin(cx) { + Poll::Ready(result) => match result { + Ok(()) => { + self.future_count -= 1; + futures.pop_front(); + } + Err(e) => { + return Poll::Ready(Err(e)); + } + }, + Poll::Pending => { + break 'outer; + } + } + } + + // when we reach here, there must not be any pending or error future. + // Which means all futures of this stream chunk have been finished + assert!(futures.is_empty()); + latest_offset = Some(TruncateOffset::Chunk { + epoch: *epoch, + chunk_id: *chunk_id, + }); + self.items.pop_front().expect("items not empty"); + } + DeliveryFutureManagerItem::Barrier => { + latest_offset = Some(TruncateOffset::Barrier { epoch: *epoch }); + self.items.pop_front().expect("items not empty"); + // Barrier will be yielded anyway + break 'outer; + } + } + } + if let Some(offset) = latest_offset { + Poll::Ready(Ok(offset)) + } else { + Poll::Pending + } + }) + } +} + +#[cfg(test)] +mod tests { + use std::future::{poll_fn, Future}; + use std::pin::pin; + use std::task::Poll; + + use futures::{FutureExt, TryFuture}; + use tokio::sync::oneshot; + use tokio::sync::oneshot::Receiver; + + use crate::sink::log_store::{DeliveryFutureManager, TruncateOffset}; + + #[test] + fn test_truncate_offset_cmp() { + assert!( + TruncateOffset::Barrier { epoch: 232 } + < TruncateOffset::Chunk { + epoch: 233, + chunk_id: 1 + } + ); + assert_eq!( + TruncateOffset::Chunk { + epoch: 1, + chunk_id: 1 + }, + TruncateOffset::Chunk { + epoch: 1, + chunk_id: 1 + } + ); + assert!( + TruncateOffset::Chunk { + epoch: 1, + chunk_id: 1 + } < TruncateOffset::Chunk { + epoch: 1, + chunk_id: 2 + } + ); + assert!( + TruncateOffset::Barrier { epoch: 1 } + > TruncateOffset::Chunk { + epoch: 1, + chunk_id: 2 + } + ); + assert!( + TruncateOffset::Chunk { + epoch: 1, + chunk_id: 2 + } < TruncateOffset::Barrier { epoch: 1 } + ); + assert!( + TruncateOffset::Chunk { + epoch: 2, + chunk_id: 2 + } > TruncateOffset::Barrier { epoch: 1 } + ); + assert!(TruncateOffset::Barrier { epoch: 2 } > TruncateOffset::Barrier { epoch: 1 }); + } + + type TestFuture = impl TryFuture + Unpin + 'static; + fn to_test_future(rx: Receiver>) -> TestFuture { + async move { rx.await.unwrap() }.boxed() + } + + #[tokio::test] + async fn test_empty() { + let mut manager = DeliveryFutureManager::::new(2); + let mut future = pin!(manager.next_truncate_offset()); + assert!(poll_fn(|cx| Poll::Ready(future.as_mut().poll(cx))) + .await + .is_pending()); + } + + #[tokio::test] + async fn test_future_delivery_manager_basic() { + let mut manager = DeliveryFutureManager::new(2); + let epoch1 = 233; + let chunk_id1 = 1; + let (tx1_1, rx1_1) = oneshot::channel(); + let mut write_chunk = manager.start_write_chunk(epoch1, chunk_id1); + assert!(!write_chunk + .add_future_may_await(to_test_future(rx1_1)) + .await + .unwrap()); + assert_eq!(manager.future_count, 1); + { + let mut next_truncate_offset = pin!(manager.next_truncate_offset()); + assert!( + poll_fn(|cx| Poll::Ready(next_truncate_offset.as_mut().poll(cx))) + .await + .is_pending() + ); + tx1_1.send(Ok(())).unwrap(); + assert_eq!( + next_truncate_offset.await.unwrap(), + TruncateOffset::Chunk { + epoch: epoch1, + chunk_id: chunk_id1 + } + ); + } + assert_eq!(manager.future_count, 0); + manager.add_barrier(epoch1); + assert_eq!( + manager.next_truncate_offset().await.unwrap(), + TruncateOffset::Barrier { epoch: epoch1 } + ); + } + + #[tokio::test] + async fn test_future_delivery_manager_compress_chunk() { + let mut manager = DeliveryFutureManager::new(10); + let epoch1 = 233; + let chunk_id1 = 1; + let chunk_id2 = chunk_id1 + 1; + let chunk_id3 = chunk_id2 + 1; + let (tx1_1, rx1_1) = oneshot::channel(); + let (tx1_2, rx1_2) = oneshot::channel(); + let (tx1_3, rx1_3) = oneshot::channel(); + let epoch2 = epoch1 + 1; + let (tx2_1, rx2_1) = oneshot::channel(); + assert!(!manager + .start_write_chunk(epoch1, chunk_id1) + .add_future_may_await(to_test_future(rx1_1)) + .await + .unwrap()); + assert!(!manager + .start_write_chunk(epoch1, chunk_id2) + .add_future_may_await(to_test_future(rx1_2)) + .await + .unwrap()); + assert!(!manager + .start_write_chunk(epoch1, chunk_id3) + .add_future_may_await(to_test_future(rx1_3)) + .await + .unwrap()); + manager.add_barrier(epoch1); + assert!(!manager + .start_write_chunk(epoch2, chunk_id1) + .add_future_may_await(to_test_future(rx2_1)) + .await + .unwrap()); + assert_eq!(manager.future_count, 4); + { + let mut next_truncate_offset = pin!(manager.next_truncate_offset()); + assert!( + poll_fn(|cx| Poll::Ready(next_truncate_offset.as_mut().poll(cx))) + .await + .is_pending() + ); + tx1_2.send(Ok(())).unwrap(); + assert!( + poll_fn(|cx| Poll::Ready(next_truncate_offset.as_mut().poll(cx))) + .await + .is_pending() + ); + tx1_1.send(Ok(())).unwrap(); + // The offset of chunk1 and chunk2 are compressed + assert_eq!( + next_truncate_offset.await.unwrap(), + TruncateOffset::Chunk { + epoch: epoch1, + chunk_id: chunk_id2 + } + ); + } + assert_eq!(manager.future_count, 2); + { + let mut next_truncate_offset = pin!(manager.next_truncate_offset()); + assert!( + poll_fn(|cx| Poll::Ready(next_truncate_offset.as_mut().poll(cx))) + .await + .is_pending() + ); + tx1_3.send(Ok(())).unwrap(); + tx2_1.send(Ok(())).unwrap(); + // Emit barrier though later chunk has finished. + assert_eq!( + next_truncate_offset.await.unwrap(), + TruncateOffset::Barrier { epoch: epoch1 } + ); + } + assert_eq!(manager.future_count, 1); + assert_eq!( + manager.next_truncate_offset().await.unwrap(), + TruncateOffset::Chunk { + epoch: epoch2, + chunk_id: chunk_id1 + } + ); + } + + #[tokio::test] + async fn test_future_delivery_manager_await_future() { + let mut manager = DeliveryFutureManager::new(2); + let epoch = 233; + let chunk_id1 = 1; + let chunk_id2 = chunk_id1 + 1; + let (tx1_1, rx1_1) = oneshot::channel(); + let (tx1_2, rx1_2) = oneshot::channel(); + let (tx2_1, rx2_1) = oneshot::channel(); + let (tx2_2, rx2_2) = oneshot::channel(); + + { + let mut write_chunk = manager.start_write_chunk(epoch, chunk_id1); + assert!(!write_chunk + .add_future_may_await(to_test_future(rx1_1)) + .await + .unwrap()); + assert!(!write_chunk + .add_future_may_await(to_test_future(rx1_2)) + .await + .unwrap()); + assert_eq!(manager.future_count, 2); + } + + { + let mut write_chunk = manager.start_write_chunk(epoch, chunk_id2); + { + let mut future1 = pin!(write_chunk.add_future_may_await(to_test_future(rx2_1))); + assert!(poll_fn(|cx| Poll::Ready(future1.as_mut().poll(cx))) + .await + .is_pending()); + tx1_1.send(Ok(())).unwrap(); + assert!(future1.await.unwrap()); + } + assert_eq!(2, write_chunk.future_count()); + { + let mut future2 = pin!(write_chunk.add_future_may_await(to_test_future(rx2_2))); + assert!(poll_fn(|cx| Poll::Ready(future2.as_mut().poll(cx))) + .await + .is_pending()); + tx1_2.send(Ok(())).unwrap(); + assert!(future2.await.unwrap()); + } + assert_eq!(2, write_chunk.future_count()); + { + let mut future3 = pin!(write_chunk.await_one_delivery()); + assert!(poll_fn(|cx| Poll::Ready(future3.as_mut().poll(cx))) + .await + .is_pending()); + tx2_1.send(Ok(())).unwrap(); + future3.await.unwrap(); + } + assert_eq!(1, write_chunk.future_count()); + } + + assert_eq!( + manager.next_truncate_offset().await.unwrap(), + TruncateOffset::Chunk { + epoch, + chunk_id: chunk_id1 + } + ); + + assert_eq!(1, manager.future_count); + + { + let mut future = pin!(manager.next_truncate_offset()); + assert!(poll_fn(|cx| Poll::Ready(future.as_mut().poll(cx))) + .await + .is_pending()); + tx2_2.send(Ok(())).unwrap(); + assert_eq!( + future.await.unwrap(), + TruncateOffset::Chunk { + epoch, + chunk_id: chunk_id2 + } + ); + } + + assert_eq!(0, manager.future_count); + } +} diff --git a/src/connector/src/sink/mod.rs b/src/connector/src/sink/mod.rs index fd1de194671b3..6afd08778cd96 100644 --- a/src/connector/src/sink/mod.rs +++ b/src/connector/src/sink/mod.rs @@ -12,55 +12,117 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod blackhole; pub mod boxed; pub mod catalog; pub mod clickhouse; pub mod coordinate; +pub mod doris; +pub mod doris_connector; +pub mod encoder; +pub mod formatter; pub mod iceberg; pub mod kafka; pub mod kinesis; +pub mod log_store; pub mod nats; +pub mod pulsar; pub mod redis; pub mod remote; -#[cfg(any(test, madsim))] pub mod test_sink; pub mod utils; +pub mod writer; use std::collections::HashMap; -use std::sync::Arc; use ::clickhouse::error::Error as ClickHouseError; +use ::redis::RedisError; use anyhow::anyhow; use async_trait::async_trait; -use enum_as_inner::EnumAsInner; -use risingwave_common::array::StreamChunk; use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::{ColumnDesc, Field, Schema}; use risingwave_common::error::{anyhow_error, ErrorCode, RwError}; +use risingwave_common::metrics::{ + LabelGuardedHistogram, LabelGuardedIntCounter, LabelGuardedIntGauge, +}; use risingwave_pb::catalog::PbSinkType; use risingwave_pb::connector_service::{PbSinkParam, SinkMetadata, TableSchema}; use risingwave_rpc_client::error::RpcError; -use risingwave_rpc_client::{ConnectorClient, MetaClient}; +use risingwave_rpc_client::MetaClient; use thiserror::Error; pub use tracing; -use self::catalog::SinkType; -use self::clickhouse::{ClickHouseConfig, ClickHouseSink}; -use self::iceberg::{IcebergSink, ICEBERG_SINK, REMOTE_ICEBERG_SINK}; -use crate::sink::boxed::BoxSink; +use self::catalog::{SinkFormatDesc, SinkType}; +use crate::sink::catalog::desc::SinkDesc; use crate::sink::catalog::{SinkCatalog, SinkId}; -use crate::sink::clickhouse::CLICKHOUSE_SINK; -use crate::sink::iceberg::{IcebergConfig, RemoteIcebergConfig, RemoteIcebergSink}; -use crate::sink::kafka::{KafkaConfig, KafkaSink, KAFKA_SINK}; -use crate::sink::kinesis::{KinesisSink, KinesisSinkConfig, KINESIS_SINK}; -use crate::sink::nats::{NatsConfig, NatsSink, NATS_SINK}; -use crate::sink::redis::{RedisConfig, RedisSink}; -use crate::sink::remote::{CoordinatedRemoteSink, RemoteConfig, RemoteSink}; -#[cfg(any(test, madsim))] -use crate::sink::test_sink::{build_test_sink, TEST_SINK_NAME}; +use crate::sink::log_store::LogReader; +use crate::sink::writer::SinkWriter; use crate::ConnectorParams; -pub const DOWNSTREAM_SINK_KEY: &str = "connector"; +#[macro_export] +macro_rules! for_all_sinks { + ($macro:path $(, $arg:tt)*) => { + $macro! { + { + { Redis, $crate::sink::redis::RedisSink }, + { Kafka, $crate::sink::kafka::KafkaSink }, + { Pulsar, $crate::sink::pulsar::PulsarSink }, + { BlackHole, $crate::sink::blackhole::BlackHoleSink }, + { Kinesis, $crate::sink::kinesis::KinesisSink }, + { ClickHouse, $crate::sink::clickhouse::ClickHouseSink }, + { Iceberg, $crate::sink::iceberg::IcebergSink }, + { Nats, $crate::sink::nats::NatsSink }, + { RemoteIceberg, $crate::sink::iceberg::RemoteIcebergSink }, + { Jdbc, $crate::sink::remote::JdbcSink }, + { DeltaLake, $crate::sink::remote::DeltaLakeSink }, + { ElasticSearch, $crate::sink::remote::ElasticSearchSink }, + { Cassandra, $crate::sink::remote::CassandraSink }, + { Doris, $crate::sink::doris::DorisSink }, + { Test, $crate::sink::test_sink::TestSink } + } + $(,$arg)* + } + }; +} + +#[macro_export] +macro_rules! dispatch_sink { + ({$({$variant_name:ident, $sink_type:ty}),*}, $impl:tt, $sink:tt, $body:tt) => {{ + use $crate::sink::SinkImpl; + + match $impl { + $( + SinkImpl::$variant_name($sink) => $body, + )* + } + }}; + ($impl:expr, $sink:ident, $body:expr) => {{ + $crate::for_all_sinks! {$crate::dispatch_sink, {$impl}, $sink, {$body}} + }}; +} + +#[macro_export] +macro_rules! match_sink_name_str { + ({$({$variant_name:ident, $sink_type:ty}),*}, $name_str:tt, $type_name:ident, $body:tt, $on_other_closure:tt) => {{ + use $crate::sink::Sink; + match $name_str { + $( + <$sink_type>::SINK_NAME => { + type $type_name = $sink_type; + { + $body + } + }, + )* + other => ($on_other_closure)(other), + } + }}; + ($name_str:expr, $type_name:ident, $body:expr, $on_other_closure:expr) => {{ + $crate::for_all_sinks! {$crate::match_sink_name_str, {$name_str}, $type_name, {$body}, {$on_other_closure}} + }}; +} + +pub const CONNECTOR_TYPE_KEY: &str = "connector"; pub const SINK_TYPE_OPTION: &str = "type"; pub const SINK_TYPE_APPEND_ONLY: &str = "append-only"; pub const SINK_TYPE_DEBEZIUM: &str = "debezium"; @@ -72,8 +134,9 @@ pub struct SinkParam { pub sink_id: SinkId, pub properties: HashMap, pub columns: Vec, - pub pk_indices: Vec, + pub downstream_pk: Vec, pub sink_type: SinkType, + pub format_desc: Option, pub db_name: String, pub sink_from_name: String, } @@ -81,18 +144,30 @@ pub struct SinkParam { impl SinkParam { pub fn from_proto(pb_param: PbSinkParam) -> Self { let table_schema = pb_param.table_schema.expect("should contain table schema"); + let format_desc = match pb_param.format_desc { + Some(f) => f.try_into().ok(), + None => { + let connector = pb_param.properties.get(CONNECTOR_TYPE_KEY); + let r#type = pb_param.properties.get(SINK_TYPE_OPTION); + match (connector, r#type) { + (Some(c), Some(t)) => SinkFormatDesc::from_legacy_type(c, t).ok().flatten(), + _ => None, + } + } + }; Self { sink_id: SinkId::from(pb_param.sink_id), properties: pb_param.properties, columns: table_schema.columns.iter().map(ColumnDesc::from).collect(), - pk_indices: table_schema + downstream_pk: table_schema .pk_indices .iter() .map(|i| *i as usize) .collect(), sink_type: SinkType::from_proto( - PbSinkType::from_i32(pb_param.sink_type).expect("should be able to convert"), + PbSinkType::try_from(pb_param.sink_type).expect("should be able to convert"), ), + format_desc, db_name: pb_param.db_name, sink_from_name: pb_param.sink_from_name, } @@ -104,9 +179,10 @@ impl SinkParam { properties: self.properties.clone(), table_schema: Some(TableSchema { columns: self.columns.iter().map(|col| col.to_protobuf()).collect(), - pk_indices: self.pk_indices.iter().map(|i| *i as u32).collect(), + pk_indices: self.downstream_pk.iter().map(|i| *i as u32).collect(), }), sink_type: self.sink_type.to_proto().into(), + format_desc: self.format_desc.as_ref().map(|f| f.to_proto()), db_name: self.db_name.clone(), sink_from_name: self.sink_from_name.clone(), } @@ -129,122 +205,81 @@ impl From for SinkParam { sink_id: sink_catalog.id, properties: sink_catalog.properties, columns, - pk_indices: sink_catalog.downstream_pk, + downstream_pk: sink_catalog.downstream_pk, sink_type: sink_catalog.sink_type, + format_desc: sink_catalog.format_desc, db_name: sink_catalog.db_name, sink_from_name: sink_catalog.sink_from_name, } } } -#[derive(Clone, Default)] +#[derive(Clone)] +pub struct SinkMetrics { + pub sink_commit_duration_metrics: LabelGuardedHistogram<3>, + pub connector_sink_rows_received: LabelGuardedIntCounter<2>, + pub log_store_first_write_epoch: LabelGuardedIntGauge<3>, + pub log_store_latest_write_epoch: LabelGuardedIntGauge<3>, + pub log_store_write_rows: LabelGuardedIntCounter<3>, + pub log_store_latest_read_epoch: LabelGuardedIntGauge<3>, + pub log_store_read_rows: LabelGuardedIntCounter<3>, +} + +impl SinkMetrics { + fn for_test() -> Self { + SinkMetrics { + sink_commit_duration_metrics: LabelGuardedHistogram::test_histogram(), + connector_sink_rows_received: LabelGuardedIntCounter::test_int_counter(), + log_store_first_write_epoch: LabelGuardedIntGauge::test_int_gauge(), + log_store_latest_write_epoch: LabelGuardedIntGauge::test_int_gauge(), + log_store_latest_read_epoch: LabelGuardedIntGauge::test_int_gauge(), + log_store_write_rows: LabelGuardedIntCounter::test_int_counter(), + log_store_read_rows: LabelGuardedIntCounter::test_int_counter(), + } + } +} + +#[derive(Clone)] pub struct SinkWriterParam { pub connector_params: ConnectorParams, pub executor_id: u64, pub vnode_bitmap: Option, pub meta_client: Option, + pub sink_metrics: SinkMetrics, } -#[async_trait] -pub trait Sink { - type Writer: SinkWriter; - type Coordinator: SinkCommitCoordinator; - - async fn validate(&self, client: Option) -> Result<()>; - async fn new_writer(&self, writer_param: SinkWriterParam) -> Result; - async fn new_coordinator( - &self, - _connector_client: Option, - ) -> Result { - Err(SinkError::Coordinator(anyhow!("no coordinator"))) +impl SinkWriterParam { + pub fn for_test() -> Self { + SinkWriterParam { + connector_params: Default::default(), + executor_id: Default::default(), + vnode_bitmap: Default::default(), + meta_client: Default::default(), + sink_metrics: SinkMetrics::for_test(), + } } } -#[async_trait] -pub trait SinkWriter: Send + 'static { - type CommitMetadata: Send = (); - /// Begin a new epoch - async fn begin_epoch(&mut self, epoch: u64) -> Result<()>; - - /// Write a stream chunk to sink - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()>; - - /// Receive a barrier and mark the end of current epoch. When `is_checkpoint` is true, the sink - /// writer should commit the current epoch. - async fn barrier(&mut self, is_checkpoint: bool) -> Result; +pub trait Sink: TryFrom { + const SINK_NAME: &'static str; + type LogSinker: LogSinker; + type Coordinator: SinkCommitCoordinator; - /// Clean up - async fn abort(&mut self) -> Result<()> { - Ok(()) + fn default_sink_decouple(_desc: &SinkDesc) -> bool { + false } - /// Update the vnode bitmap of current sink writer - async fn update_vnode_bitmap(&mut self, _vnode_bitmap: Arc) -> Result<()> { - Ok(()) - } -} - -#[async_trait] -// An old version of SinkWriter for backward compatibility -pub trait SinkWriterV1: Send + 'static { - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()>; - - // the following interface is for transactions, if not supported, return Ok(()) - // start a transaction with epoch number. Note that epoch number should be increasing. - async fn begin_epoch(&mut self, epoch: u64) -> Result<()>; - - // commits the current transaction and marks all messages in the transaction success. - async fn commit(&mut self) -> Result<()>; - - // aborts the current transaction because some error happens. we should rollback to the last - // commit point. - async fn abort(&mut self) -> Result<()>; -} - -pub struct SinkWriterV1Adapter { - is_empty: bool, - epoch: u64, - inner: W, -} - -impl SinkWriterV1Adapter { - pub(crate) fn new(inner: W) -> Self { - Self { - inner, - is_empty: true, - epoch: u64::MIN, - } + async fn validate(&self) -> Result<()>; + async fn new_log_sinker(&self, writer_param: SinkWriterParam) -> Result; + #[expect(clippy::unused_async)] + async fn new_coordinator(&self) -> Result { + Err(SinkError::Coordinator(anyhow!("no coordinator"))) } } #[async_trait] -impl SinkWriter for SinkWriterV1Adapter { - async fn begin_epoch(&mut self, epoch: u64) -> Result<()> { - self.epoch = epoch; - Ok(()) - } - - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { - if self.is_empty { - self.is_empty = false; - self.inner.begin_epoch(self.epoch).await?; - } - self.inner.write_batch(chunk).await - } - - async fn barrier(&mut self, is_checkpoint: bool) -> Result<()> { - if is_checkpoint { - if !self.is_empty { - self.inner.commit().await? - } - self.is_empty = true; - } - Ok(()) - } - - async fn abort(&mut self) -> Result<()> { - self.inner.abort().await - } +pub trait LogSinker: 'static { + async fn consume_log_and_sink(self, log_reader: impl LogReader) -> Result<()>; } #[async_trait] @@ -271,179 +306,61 @@ impl SinkCommitCoordinator for DummySinkCommitCoordinator { } } -#[derive(Clone, Debug, EnumAsInner)] -pub enum SinkConfig { - Redis(RedisConfig), - Kafka(Box), - Remote(RemoteConfig), - Kinesis(Box), - Iceberg(IcebergConfig), - RemoteIceberg(RemoteIcebergConfig), - BlackHole, - ClickHouse(Box), - Nats(NatsConfig), - #[cfg(any(test, madsim))] - Test, -} - -pub const BLACKHOLE_SINK: &str = "blackhole"; - -#[derive(Debug)] -pub struct BlackHoleSink; - -#[async_trait] -impl Sink for BlackHoleSink { - type Coordinator = DummySinkCommitCoordinator; - type Writer = Self; - - async fn new_writer(&self, _writer_env: SinkWriterParam) -> Result { - Ok(Self) - } - - async fn validate(&self, _client: Option) -> Result<()> { - Ok(()) - } -} - -#[async_trait] -impl SinkWriter for BlackHoleSink { - async fn write_batch(&mut self, _chunk: StreamChunk) -> Result<()> { - Ok(()) - } - - async fn begin_epoch(&mut self, _epoch: u64) -> Result<()> { - Ok(()) - } - - async fn barrier(&mut self, _is_checkpoint: bool) -> Result<()> { - Ok(()) - } -} - -impl SinkConfig { - pub fn from_hashmap(mut properties: HashMap) -> Result { - const CONNECTOR_TYPE_KEY: &str = "connector"; +impl SinkImpl { + pub fn new(mut param: SinkParam) -> Result { const CONNECTION_NAME_KEY: &str = "connection.name"; const PRIVATE_LINK_TARGET_KEY: &str = "privatelink.targets"; // remove privatelink related properties if any - properties.remove(PRIVATE_LINK_TARGET_KEY); - properties.remove(CONNECTION_NAME_KEY); + param.properties.remove(PRIVATE_LINK_TARGET_KEY); + param.properties.remove(CONNECTION_NAME_KEY); - let sink_type = properties + let sink_type = param + .properties .get(CONNECTOR_TYPE_KEY) .ok_or_else(|| SinkError::Config(anyhow!("missing config: {}", CONNECTOR_TYPE_KEY)))?; - match sink_type.to_lowercase().as_str() { - KAFKA_SINK => Ok(SinkConfig::Kafka(Box::new(KafkaConfig::from_hashmap( - properties, - )?))), - KINESIS_SINK => Ok(SinkConfig::Kinesis(Box::new( - KinesisSinkConfig::from_hashmap(properties)?, - ))), - CLICKHOUSE_SINK => Ok(SinkConfig::ClickHouse(Box::new( - ClickHouseConfig::from_hashmap(properties)?, - ))), - BLACKHOLE_SINK => Ok(SinkConfig::BlackHole), - REMOTE_ICEBERG_SINK => Ok(SinkConfig::RemoteIceberg( - RemoteIcebergConfig::from_hashmap(properties)?, - )), - ICEBERG_SINK => Ok(SinkConfig::Iceberg(IcebergConfig::from_hashmap( - properties, - )?)), - NATS_SINK => Ok(SinkConfig::Nats(NatsConfig::from_hashmap(properties)?)), - // Only in test or deterministic test, test sink is enabled. - #[cfg(any(test, madsim))] - TEST_SINK_NAME => Ok(SinkConfig::Test), - _ => Ok(SinkConfig::Remote(RemoteConfig::from_hashmap(properties)?)), - } + match_sink_name_str!( + sink_type.to_lowercase().as_str(), + SinkType, + Ok(SinkType::try_from(param)?.into()), + |other| { + Err(SinkError::Config(anyhow!( + "unsupported sink connector {}", + other + ))) + } + ) } } pub fn build_sink(param: SinkParam) -> Result { - let config = SinkConfig::from_hashmap(param.properties.clone())?; - SinkImpl::new(config, param) -} - -#[derive(Debug)] -pub enum SinkImpl { - Redis(RedisSink), - Kafka(KafkaSink), - Remote(RemoteSink), - BlackHole(BlackHoleSink), - Kinesis(KinesisSink), - ClickHouse(ClickHouseSink), - Iceberg(IcebergSink), - Nats(NatsSink), - RemoteIceberg(RemoteIcebergSink), - TestSink(BoxSink), + SinkImpl::new(param) } -impl SinkImpl { - pub fn get_connector(&self) -> &'static str { - match self { - SinkImpl::Kafka(_) => "kafka", - SinkImpl::Redis(_) => "redis", - SinkImpl::Remote(_) => "remote", - SinkImpl::BlackHole(_) => "blackhole", - SinkImpl::Kinesis(_) => "kinesis", - SinkImpl::ClickHouse(_) => "clickhouse", - SinkImpl::Iceberg(_) => "iceberg", - SinkImpl::Nats(_) => "nats", - SinkImpl::RemoteIceberg(_) => "iceberg", - SinkImpl::TestSink(_) => "test", - } - } -} - -#[macro_export] -macro_rules! dispatch_sink { - ($impl:expr, $sink:ident, $body:tt) => {{ - use $crate::sink::SinkImpl; - - match $impl { - SinkImpl::Redis($sink) => $body, - SinkImpl::Kafka($sink) => $body, - SinkImpl::Remote($sink) => $body, - SinkImpl::BlackHole($sink) => $body, - SinkImpl::Kinesis($sink) => $body, - SinkImpl::ClickHouse($sink) => $body, - SinkImpl::Iceberg($sink) => $body, - SinkImpl::Nats($sink) => $body, - SinkImpl::RemoteIceberg($sink) => $body, - SinkImpl::TestSink($sink) => $body, +macro_rules! def_sink_impl { + () => { + $crate::for_all_sinks! { def_sink_impl } + }; + ({ $({ $variant_name:ident, $sink_type:ty }),* }) => { + #[derive(Debug)] + pub enum SinkImpl { + $( + $variant_name($sink_type), + )* } - }}; -} -impl SinkImpl { - pub fn new(cfg: SinkConfig, param: SinkParam) -> Result { - Ok(match cfg { - SinkConfig::Redis(cfg) => SinkImpl::Redis(RedisSink::new(cfg, param.schema())?), - SinkConfig::Kafka(cfg) => SinkImpl::Kafka(KafkaSink::new(*cfg, param)), - SinkConfig::Kinesis(cfg) => SinkImpl::Kinesis(KinesisSink::new(*cfg, param)), - SinkConfig::Remote(cfg) => SinkImpl::Remote(RemoteSink::new(cfg, param)), - SinkConfig::BlackHole => SinkImpl::BlackHole(BlackHoleSink), - SinkConfig::ClickHouse(cfg) => SinkImpl::ClickHouse(ClickHouseSink::new( - *cfg, - param.schema(), - param.pk_indices, - param.sink_type.is_append_only(), - )?), - SinkConfig::Iceberg(cfg) => SinkImpl::Iceberg(IcebergSink::new(cfg, param)?), - SinkConfig::Nats(cfg) => SinkImpl::Nats(NatsSink::new( - cfg, - param.schema(), - param.sink_type.is_append_only(), - )), - SinkConfig::RemoteIceberg(cfg) => { - SinkImpl::RemoteIceberg(CoordinatedRemoteSink(RemoteSink::new(cfg, param))) + $( + impl From<$sink_type> for SinkImpl { + fn from(sink: $sink_type) -> SinkImpl { + SinkImpl::$variant_name(sink) + } } - #[cfg(any(test, madsim))] - SinkConfig::Test => SinkImpl::TestSink(build_test_sink(param)?), - }) - } + )* + }; } +def_sink_impl!(); + pub type Result = std::result::Result; #[derive(Error, Debug)] @@ -454,8 +371,8 @@ pub enum SinkError { Kinesis(anyhow::Error), #[error("Remote sink error: {0}")] Remote(anyhow::Error), - #[error("Json parse error: {0}")] - JsonParse(String), + #[error("Encode error: {0}")] + Encode(String), #[error("Iceberg error: {0}")] Iceberg(anyhow::Error), #[error("config error: {0}")] @@ -464,8 +381,24 @@ pub enum SinkError { Coordinator(anyhow::Error), #[error("ClickHouse error: {0}")] ClickHouse(String), + #[error("Redis error: {0}")] + Redis(String), #[error("Nats error: {0}")] Nats(anyhow::Error), + #[error("Doris http error: {0}")] + Http(anyhow::Error), + #[error("Doris error: {0}")] + Doris(String), + #[error("Pulsar error: {0}")] + Pulsar(anyhow::Error), + #[error("Internal error: {0}")] + Internal(anyhow::Error), +} + +impl From for SinkError { + fn from(value: icelake::Error) -> Self { + SinkError::Iceberg(anyhow_error!("{}", value)) + } } impl From for SinkError { @@ -480,6 +413,12 @@ impl From for SinkError { } } +impl From for SinkError { + fn from(value: RedisError) -> Self { + SinkError::Redis(format!("{}", value)) + } +} + impl From for RwError { fn from(e: SinkError) -> Self { ErrorCode::SinkError(Box::new(e)).into() diff --git a/src/connector/src/sink/nats.rs b/src/connector/src/sink/nats.rs index c2408acbdab9d..2f810eed786a9 100644 --- a/src/connector/src/sink/nats.rs +++ b/src/connector/src/sink/nats.rs @@ -19,16 +19,21 @@ use async_nats::jetstream::context::Context; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; use risingwave_common::error::anyhow_error; -use risingwave_rpc_client::ConnectorClient; use serde_derive::Deserialize; use serde_with::serde_as; use tokio_retry::strategy::{jitter, ExponentialBackoff}; use tokio_retry::Retry; use super::utils::chunk_to_json; -use super::{DummySinkCommitCoordinator, SinkWriter, SinkWriterParam}; +use super::{DummySinkCommitCoordinator, SinkWriterParam}; use crate::common::NatsCommon; -use crate::sink::{Result, Sink, SinkError, SINK_TYPE_APPEND_ONLY}; +use crate::sink::catalog::desc::SinkDesc; +use crate::sink::encoder::{JsonEncoder, TimestampHandlingMode}; +use crate::sink::log_store::DeliveryFutureManagerAddFuture; +use crate::sink::writer::{ + AsyncTruncateLogSinkerOf, AsyncTruncateSinkWriter, AsyncTruncateSinkWriterExt, +}; +use crate::sink::{Result, Sink, SinkError, SinkParam, SINK_TYPE_APPEND_ONLY}; pub const NATS_SINK: &str = "nats"; @@ -53,6 +58,7 @@ pub struct NatsSinkWriter { pub config: NatsConfig, context: Context, schema: Schema, + json_encoder: JsonEncoder, } /// Basic data types for use with the nats interface @@ -70,22 +76,31 @@ impl NatsConfig { } } -impl NatsSink { - pub fn new(config: NatsConfig, schema: Schema, is_append_only: bool) -> Self { - Self { +impl TryFrom for NatsSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + let schema = param.schema(); + let config = NatsConfig::from_hashmap(param.properties)?; + Ok(Self { config, schema, - is_append_only, - } + is_append_only: param.sink_type.is_append_only(), + }) } } -#[async_trait::async_trait] impl Sink for NatsSink { type Coordinator = DummySinkCommitCoordinator; - type Writer = NatsSinkWriter; + type LogSinker = AsyncTruncateLogSinkerOf; + + const SINK_NAME: &'static str = NATS_SINK; - async fn validate(&self, _client: Option) -> Result<()> { + fn default_sink_decouple(desc: &SinkDesc) -> bool { + desc.sink_type.is_append_only() + } + + async fn validate(&self) -> Result<()> { if !self.is_append_only { return Err(SinkError::Nats(anyhow!( "Nats sink only support append-only mode" @@ -103,8 +118,12 @@ impl Sink for NatsSink { Ok(()) } - async fn new_writer(&self, _writer_env: SinkWriterParam) -> Result { - Ok(NatsSinkWriter::new(self.config.clone(), self.schema.clone()).await?) + async fn new_log_sinker(&self, _writer_param: SinkWriterParam) -> Result { + Ok( + NatsSinkWriter::new(self.config.clone(), self.schema.clone()) + .await? + .into_log_sinker(usize::MAX), + ) } } @@ -119,6 +138,7 @@ impl NatsSinkWriter { config: config.clone(), context, schema: schema.clone(), + json_encoder: JsonEncoder::new(schema, None, TimestampHandlingMode::Milli), }) } @@ -126,7 +146,7 @@ impl NatsSinkWriter { Retry::spawn( ExponentialBackoff::from_millis(100).map(jitter).take(3), || async { - let data = chunk_to_json(chunk.clone(), &self.schema).unwrap(); + let data = chunk_to_json(chunk.clone(), &self.json_encoder).unwrap(); for item in data { self.context .publish(self.config.common.subject.clone(), item.into()) @@ -141,17 +161,12 @@ impl NatsSinkWriter { } } -#[async_trait::async_trait] -impl SinkWriter for NatsSinkWriter { - async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { +impl AsyncTruncateSinkWriter for NatsSinkWriter { + async fn write_chunk<'a>( + &'a mut self, + chunk: StreamChunk, + _add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>, + ) -> Result<()> { self.append_only(chunk).await } - - async fn begin_epoch(&mut self, _epoch_id: u64) -> Result<()> { - Ok(()) - } - - async fn barrier(&mut self, _is_checkpoint: bool) -> Result<()> { - Ok(()) - } } diff --git a/src/connector/src/sink/pulsar.rs b/src/connector/src/sink/pulsar.rs new file mode 100644 index 0000000000000..9eb57c1ae0771 --- /dev/null +++ b/src/connector/src/sink/pulsar.rs @@ -0,0 +1,346 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::fmt::Debug; +use std::time::Duration; + +use anyhow::anyhow; +use futures::{FutureExt, TryFuture, TryFutureExt}; +use pulsar::producer::{Message, SendFuture}; +use pulsar::{Producer, ProducerOptions, Pulsar, TokioExecutor}; +use risingwave_common::array::StreamChunk; +use risingwave_common::catalog::Schema; +use serde::Deserialize; +use serde_with::{serde_as, DisplayFromStr}; + +use super::catalog::{SinkFormat, SinkFormatDesc}; +use super::{Sink, SinkError, SinkParam, SinkWriterParam}; +use crate::common::PulsarCommon; +use crate::sink::catalog::desc::SinkDesc; +use crate::sink::encoder::SerTo; +use crate::sink::formatter::{SinkFormatter, SinkFormatterImpl}; +use crate::sink::log_store::DeliveryFutureManagerAddFuture; +use crate::sink::writer::{ + AsyncTruncateLogSinkerOf, AsyncTruncateSinkWriter, AsyncTruncateSinkWriterExt, FormattedSink, +}; +use crate::sink::{DummySinkCommitCoordinator, Result}; +use crate::{deserialize_duration_from_string, dispatch_sink_formatter_impl}; + +pub const PULSAR_SINK: &str = "pulsar"; + +/// The delivery buffer queue size +/// When the `SendFuture` the current `send_future_buffer` +/// is buffering is greater than this size, then enforcing commit once +const PULSAR_SEND_FUTURE_BUFFER_MAX_SIZE: usize = 65536; + +const fn _default_max_retries() -> u32 { + 3 +} + +const fn _default_retry_backoff() -> Duration { + Duration::from_millis(100) +} + +const fn _default_batch_size() -> u32 { + 10000 +} + +const fn _default_batch_byte_size() -> usize { + 1 << 20 +} + +fn pulsar_to_sink_err(e: pulsar::Error) -> SinkError { + SinkError::Pulsar(anyhow!(e)) +} + +async fn build_pulsar_producer( + pulsar: &Pulsar, + config: &PulsarConfig, +) -> Result> { + pulsar + .producer() + .with_options(ProducerOptions { + batch_size: Some(config.producer_properties.batch_size), + batch_byte_size: Some(config.producer_properties.batch_byte_size), + ..Default::default() + }) + .with_topic(&config.common.topic) + .build() + .map_err(pulsar_to_sink_err) + .await +} + +#[serde_as] +#[derive(Debug, Clone, Deserialize)] +pub struct PulsarPropertiesProducer { + #[serde(rename = "properties.batch.size", default = "_default_batch_size")] + #[serde_as(as = "DisplayFromStr")] + batch_size: u32, + + #[serde( + rename = "properties.batch.byte.size", + default = "_default_batch_byte_size" + )] + #[serde_as(as = "DisplayFromStr")] + batch_byte_size: usize, +} + +#[serde_as] +#[derive(Debug, Clone, Deserialize)] +pub struct PulsarConfig { + #[serde(rename = "properties.retry.max", default = "_default_max_retries")] + #[serde_as(as = "DisplayFromStr")] + pub max_retry_num: u32, + + #[serde( + rename = "properties.retry.interval", + default = "_default_retry_backoff", + deserialize_with = "deserialize_duration_from_string" + )] + pub retry_interval: Duration, + + #[serde(flatten)] + pub common: PulsarCommon, + + #[serde(flatten)] + pub producer_properties: PulsarPropertiesProducer, +} + +impl PulsarConfig { + pub fn from_hashmap(values: HashMap) -> Result { + let config = serde_json::from_value::(serde_json::to_value(values).unwrap()) + .map_err(|e| SinkError::Config(anyhow!(e)))?; + + Ok(config) + } +} + +#[derive(Debug)] +pub struct PulsarSink { + pub config: PulsarConfig, + schema: Schema, + downstream_pk: Vec, + format_desc: SinkFormatDesc, + db_name: String, + sink_from_name: String, +} + +impl TryFrom for PulsarSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + let schema = param.schema(); + let config = PulsarConfig::from_hashmap(param.properties)?; + Ok(Self { + config, + schema, + downstream_pk: param.downstream_pk, + format_desc: param + .format_desc + .ok_or_else(|| SinkError::Config(anyhow!("missing FORMAT ... ENCODE ...")))?, + db_name: param.db_name, + sink_from_name: param.sink_from_name, + }) + } +} + +impl Sink for PulsarSink { + type Coordinator = DummySinkCommitCoordinator; + type LogSinker = AsyncTruncateLogSinkerOf; + + const SINK_NAME: &'static str = PULSAR_SINK; + + fn default_sink_decouple(desc: &SinkDesc) -> bool { + desc.sink_type.is_append_only() + } + + async fn new_log_sinker(&self, _writer_param: SinkWriterParam) -> Result { + Ok(PulsarSinkWriter::new( + self.config.clone(), + self.schema.clone(), + self.downstream_pk.clone(), + &self.format_desc, + self.db_name.clone(), + self.sink_from_name.clone(), + ) + .await? + .into_log_sinker(PULSAR_SEND_FUTURE_BUFFER_MAX_SIZE)) + } + + async fn validate(&self) -> Result<()> { + // For upsert Pulsar sink, the primary key must be defined. + if self.format_desc.format != SinkFormat::AppendOnly && self.downstream_pk.is_empty() { + return Err(SinkError::Config(anyhow!( + "primary key not defined for {:?} pulsar sink (please define in `primary_key` field)", + self.format_desc.format + ))); + } + // Check for formatter constructor error, before it is too late for error reporting. + SinkFormatterImpl::new( + &self.format_desc, + self.schema.clone(), + self.downstream_pk.clone(), + self.db_name.clone(), + self.sink_from_name.clone(), + ) + .await?; + + // Validate pulsar connection. + let pulsar = self.config.common.build_client().await?; + build_pulsar_producer(&pulsar, &self.config).await?; + + Ok(()) + } +} + +pub struct PulsarSinkWriter { + formatter: SinkFormatterImpl, + pulsar: Pulsar, + producer: Producer, + config: PulsarConfig, +} + +struct PulsarPayloadWriter<'w> { + producer: &'w mut Producer, + config: &'w PulsarConfig, + add_future: DeliveryFutureManagerAddFuture<'w, PulsarDeliveryFuture>, +} + +pub type PulsarDeliveryFuture = impl TryFuture + Unpin + 'static; + +fn may_delivery_future(future: SendFuture) -> PulsarDeliveryFuture { + future.map(|result| { + result + .map(|_| ()) + .map_err(|e: pulsar::Error| SinkError::Pulsar(anyhow!(e))) + }) +} + +impl PulsarSinkWriter { + pub async fn new( + config: PulsarConfig, + schema: Schema, + downstream_pk: Vec, + format_desc: &SinkFormatDesc, + db_name: String, + sink_from_name: String, + ) -> Result { + let formatter = + SinkFormatterImpl::new(format_desc, schema, downstream_pk, db_name, sink_from_name) + .await?; + let pulsar = config.common.build_client().await?; + let producer = build_pulsar_producer(&pulsar, &config).await?; + Ok(Self { + formatter, + pulsar, + producer, + config, + }) + } +} + +impl<'w> PulsarPayloadWriter<'w> { + async fn send_message(&mut self, message: Message) -> Result<()> { + let mut success_flag = false; + let mut connection_err = None; + + for _ in 0..self.config.max_retry_num { + match self.producer.send(message.clone()).await { + // If the message is sent successfully, + // a SendFuture holding the message receipt + // or error after sending is returned + Ok(send_future) => { + self.add_future + .add_future_may_await(may_delivery_future(send_future)) + .await?; + success_flag = true; + break; + } + // error upon sending + Err(e) => match e { + pulsar::Error::Connection(_) + | pulsar::Error::Producer(_) + | pulsar::Error::Consumer(_) => { + connection_err = Some(e); + tokio::time::sleep(self.config.retry_interval).await; + continue; + } + _ => return Err(SinkError::Pulsar(anyhow!(e))), + }, + } + } + + if !success_flag { + Err(SinkError::Pulsar(anyhow!(connection_err.unwrap()))) + } else { + Ok(()) + } + } + + async fn write_inner( + &mut self, + event_key_object: Option, + event_object: Option>, + ) -> Result<()> { + let message = Message { + partition_key: event_key_object, + payload: event_object.unwrap_or_default(), + ..Default::default() + }; + + self.send_message(message).await?; + Ok(()) + } +} + +impl<'w> FormattedSink for PulsarPayloadWriter<'w> { + type K = String; + type V = Vec; + + async fn write_one(&mut self, k: Option, v: Option) -> Result<()> { + self.write_inner(k, v).await + } +} + +impl AsyncTruncateSinkWriter for PulsarSinkWriter { + type DeliveryFuture = PulsarDeliveryFuture; + + async fn write_chunk<'a>( + &'a mut self, + chunk: StreamChunk, + add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>, + ) -> Result<()> { + dispatch_sink_formatter_impl!(&self.formatter, formatter, { + let mut payload_writer = PulsarPayloadWriter { + producer: &mut self.producer, + add_future, + config: &self.config, + }; + // TODO: we can call `payload_writer.write_chunk(chunk, formatter)`, + // but for an unknown reason, this will greatly increase the compile time, + // by nearly 4x. May investigate it later. + for r in formatter.format_chunk(&chunk) { + let (key, value) = r?; + payload_writer + .write_inner( + key.map(SerTo::ser_to).transpose()?, + value.map(SerTo::ser_to).transpose()?, + ) + .await?; + } + Ok(()) + }) + } +} diff --git a/src/connector/src/sink/redis.rs b/src/connector/src/sink/redis.rs index e5afa88c38b2f..af3ec3b981620 100644 --- a/src/connector/src/sink/redis.rs +++ b/src/connector/src/sink/redis.rs @@ -12,52 +12,413 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::{HashMap, HashSet}; + +use anyhow::anyhow; use async_trait::async_trait; +use redis::aio::Connection; +use redis::{Client as RedisClient, Pipeline}; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; -use risingwave_rpc_client::ConnectorClient; +use serde_derive::{Deserialize, Serialize}; +use serde_with::serde_as; -use crate::sink::{DummySinkCommitCoordinator, Result, Sink, SinkWriter, SinkWriterParam}; +use super::catalog::SinkFormatDesc; +use super::encoder::template::TemplateEncoder; +use super::formatter::SinkFormatterImpl; +use super::writer::FormattedSink; +use super::{SinkError, SinkParam}; +use crate::dispatch_sink_formatter_impl; +use crate::sink::log_store::DeliveryFutureManagerAddFuture; +use crate::sink::writer::{ + AsyncTruncateLogSinkerOf, AsyncTruncateSinkWriter, AsyncTruncateSinkWriterExt, +}; +use crate::sink::{DummySinkCommitCoordinator, Result, Sink, SinkWriterParam}; -#[derive(Clone, Debug)] -pub struct RedisConfig; +pub const REDIS_SINK: &str = "redis"; +pub const KEY_FORMAT: &str = "key_format"; +pub const VALUE_FORMAT: &str = "value_format"; +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct RedisCommon { + #[serde(rename = "redis.url")] + pub url: String, +} -#[derive(Debug)] -pub struct RedisSink; +impl RedisCommon { + pub(crate) fn build_client(&self) -> anyhow::Result { + let client = RedisClient::open(self.url.clone())?; + Ok(client) + } +} +#[serde_as] +#[derive(Clone, Debug, Deserialize)] +pub struct RedisConfig { + #[serde(flatten)] + pub common: RedisCommon, +} -impl RedisSink { - pub fn new(_cfg: RedisConfig, _schema: Schema) -> Result { - todo!() +impl RedisConfig { + pub fn from_hashmap(properties: HashMap) -> Result { + let config = + serde_json::from_value::(serde_json::to_value(properties).unwrap()) + .map_err(|e| SinkError::Config(anyhow!("{:?}", e)))?; + Ok(config) } } +#[derive(Debug)] +pub struct RedisSink { + config: RedisConfig, + schema: Schema, + pk_indices: Vec, + format_desc: SinkFormatDesc, + db_name: String, + sink_from_name: String, +} + #[async_trait] +impl TryFrom for RedisSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + if param.downstream_pk.is_empty() { + return Err(SinkError::Config(anyhow!( + "Redis Sink Primary Key must be specified." + ))); + } + let config = RedisConfig::from_hashmap(param.properties.clone())?; + Ok(Self { + config, + schema: param.schema(), + pk_indices: param.downstream_pk, + format_desc: param + .format_desc + .ok_or_else(|| SinkError::Config(anyhow!("missing FORMAT ... ENCODE ...")))?, + db_name: param.db_name, + sink_from_name: param.sink_from_name, + }) + } +} + impl Sink for RedisSink { type Coordinator = DummySinkCommitCoordinator; - type Writer = RedisSinkWriter; + type LogSinker = AsyncTruncateLogSinkerOf; - async fn new_writer(&self, _writer_env: SinkWriterParam) -> Result { - todo!() + const SINK_NAME: &'static str = "redis"; + + async fn new_log_sinker(&self, _writer_param: SinkWriterParam) -> Result { + Ok(RedisSinkWriter::new( + self.config.clone(), + self.schema.clone(), + self.pk_indices.clone(), + &self.format_desc, + self.db_name.clone(), + self.sink_from_name.clone(), + ) + .await? + .into_log_sinker(usize::MAX)) } - async fn validate(&self, _client: Option) -> Result<()> { - todo!() + async fn validate(&self) -> Result<()> { + let client = self.config.common.build_client()?; + client.get_connection()?; + let all_set: HashSet = self + .schema + .fields() + .iter() + .map(|f| f.name.clone()) + .collect(); + let pk_set: HashSet = self + .schema + .fields() + .iter() + .enumerate() + .filter(|(k, _)| self.pk_indices.contains(k)) + .map(|(_, v)| v.name.clone()) + .collect(); + if matches!( + self.format_desc.encode, + super::catalog::SinkEncode::Template + ) { + let key_format = self.format_desc.options.get(KEY_FORMAT).ok_or_else(|| { + SinkError::Config(anyhow!( + "Cannot find 'key_format',please set it or use JSON" + )) + })?; + let value_format = self.format_desc.options.get(VALUE_FORMAT).ok_or_else(|| { + SinkError::Config(anyhow!( + "Cannot find 'value_format',please set it or use JSON" + )) + })?; + TemplateEncoder::check_string_format(key_format, &pk_set)?; + TemplateEncoder::check_string_format(value_format, &all_set)?; + } + Ok(()) } } -pub struct RedisSinkWriter; +pub struct RedisSinkWriter { + epoch: u64, + schema: Schema, + pk_indices: Vec, + formatter: SinkFormatterImpl, + payload_writer: RedisSinkPayloadWriter, +} -#[async_trait] -impl SinkWriter for RedisSinkWriter { - async fn write_batch(&mut self, _chunk: StreamChunk) -> Result<()> { - todo!(); +struct RedisSinkPayloadWriter { + // connection to redis, one per executor + conn: Option, + // the command pipeline for write-commit + pipe: Pipeline, +} +impl RedisSinkPayloadWriter { + pub async fn new(config: RedisConfig) -> Result { + let client = config.common.build_client()?; + let conn = Some(client.get_async_connection().await?); + let pipe = redis::pipe(); + + Ok(Self { conn, pipe }) + } + + #[cfg(test)] + pub fn mock() -> Self { + let conn = None; + let pipe = redis::pipe(); + Self { conn, pipe } } - async fn begin_epoch(&mut self, _epoch: u64) -> Result<()> { - todo!() + pub async fn commit(&mut self) -> Result<()> { + self.pipe.query_async(self.conn.as_mut().unwrap()).await?; + self.pipe.clear(); + Ok(()) } +} + +impl FormattedSink for RedisSinkPayloadWriter { + type K = String; + type V = Vec; + + async fn write_one(&mut self, k: Option, v: Option) -> Result<()> { + let k = k.unwrap(); + match v { + Some(v) => self.pipe.set(k, v), + None => self.pipe.del(k), + }; + Ok(()) + } +} + +impl RedisSinkWriter { + pub async fn new( + config: RedisConfig, + schema: Schema, + pk_indices: Vec, + format_desc: &SinkFormatDesc, + db_name: String, + sink_from_name: String, + ) -> Result { + let payload_writer = RedisSinkPayloadWriter::new(config.clone()).await?; + let formatter = SinkFormatterImpl::new( + format_desc, + schema.clone(), + pk_indices.clone(), + db_name, + sink_from_name, + ) + .await?; + + Ok(Self { + schema, + pk_indices, + epoch: 0, + formatter, + payload_writer, + }) + } + + #[cfg(test)] + pub async fn mock( + schema: Schema, + pk_indices: Vec, + format_desc: &SinkFormatDesc, + ) -> Result { + let formatter = SinkFormatterImpl::new( + format_desc, + schema.clone(), + pk_indices.clone(), + "d1".to_string(), + "t1".to_string(), + ) + .await?; + Ok(Self { + schema, + pk_indices, + epoch: 0, + formatter, + payload_writer: RedisSinkPayloadWriter::mock(), + }) + } +} + +impl AsyncTruncateSinkWriter for RedisSinkWriter { + async fn write_chunk<'a>( + &'a mut self, + chunk: StreamChunk, + _add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>, + ) -> Result<()> { + dispatch_sink_formatter_impl!(&self.formatter, formatter, { + self.payload_writer.write_chunk(chunk, formatter).await + }) + } +} + +#[cfg(test)] +mod test { + use std::collections::BTreeMap; + + use rdkafka::message::FromBytes; + use risingwave_common::array::{Array, I32Array, Op, StreamChunk, Utf8Array}; + use risingwave_common::catalog::{Field, Schema}; + use risingwave_common::types::DataType; + use risingwave_common::util::iter_util::ZipEqDebug; + + use super::*; + use crate::sink::catalog::{SinkEncode, SinkFormat}; + use crate::sink::log_store::DeliveryFutureManager; + + #[tokio::test] + async fn test_write() { + let schema = Schema::new(vec![ + Field { + data_type: DataType::Int32, + name: "id".to_string(), + sub_fields: vec![], + type_name: "string".to_string(), + }, + Field { + data_type: DataType::Varchar, + name: "name".to_string(), + sub_fields: vec![], + type_name: "string".to_string(), + }, + ]); + + let format_desc = SinkFormatDesc { + format: SinkFormat::AppendOnly, + encode: SinkEncode::Json, + options: BTreeMap::default(), + }; + + let mut redis_sink_writer = RedisSinkWriter::mock(schema, vec![0], &format_desc) + .await + .unwrap(); + + let chunk_a = StreamChunk::new( + vec![Op::Insert, Op::Insert, Op::Insert], + vec![ + I32Array::from_iter(vec![1, 2, 3]).into_ref(), + Utf8Array::from_iter(vec!["Alice", "Bob", "Clare"]).into_ref(), + ], + ); + + let mut manager = DeliveryFutureManager::new(0); + + redis_sink_writer + .write_chunk(chunk_a, manager.start_write_chunk(0, 0)) + .await + .expect("failed to write batch"); + let expected_a = + vec![ + (0, "*3\r\n$3\r\nSET\r\n$8\r\n{\"id\":1}\r\n$23\r\n{\"id\":1,\"name\":\"Alice\"}\r\n"), + (1, "*3\r\n$3\r\nSET\r\n$8\r\n{\"id\":2}\r\n$21\r\n{\"id\":2,\"name\":\"Bob\"}\r\n"), + (2, "*3\r\n$3\r\nSET\r\n$8\r\n{\"id\":3}\r\n$23\r\n{\"id\":3,\"name\":\"Clare\"}\r\n"), + ]; + + redis_sink_writer + .payload_writer + .pipe + .cmd_iter() + .enumerate() + .zip_eq_debug(expected_a.clone()) + .for_each(|((i, cmd), (exp_i, exp_cmd))| { + if exp_i == i { + assert_eq!(exp_cmd, str::from_bytes(&cmd.get_packed_command()).unwrap()) + } + }); + } + + #[tokio::test] + async fn test_format_write() { + let schema = Schema::new(vec![ + Field { + data_type: DataType::Int32, + name: "id".to_string(), + sub_fields: vec![], + type_name: "string".to_string(), + }, + Field { + data_type: DataType::Varchar, + name: "name".to_string(), + sub_fields: vec![], + type_name: "string".to_string(), + }, + ]); + + let mut btree_map = BTreeMap::default(); + btree_map.insert(KEY_FORMAT.to_string(), "key-{id}".to_string()); + btree_map.insert( + VALUE_FORMAT.to_string(), + "values:{id:{id},name:{name}}".to_string(), + ); + let format_desc = SinkFormatDesc { + format: SinkFormat::AppendOnly, + encode: SinkEncode::Template, + options: btree_map, + }; + + let mut redis_sink_writer = RedisSinkWriter::mock(schema, vec![0], &format_desc) + .await + .unwrap(); + + let mut future_manager = DeliveryFutureManager::new(0); + + let chunk_a = StreamChunk::new( + vec![Op::Insert, Op::Insert, Op::Insert], + vec![ + I32Array::from_iter(vec![1, 2, 3]).into_ref(), + Utf8Array::from_iter(vec!["Alice", "Bob", "Clare"]).into_ref(), + ], + ); + + redis_sink_writer + .write_chunk(chunk_a, future_manager.start_write_chunk(0, 0)) + .await + .expect("failed to write batch"); + let expected_a = vec![ + ( + 0, + "*3\r\n$3\r\nSET\r\n$5\r\nkey-1\r\n$24\r\nvalues:{id:1,name:Alice}\r\n", + ), + ( + 1, + "*3\r\n$3\r\nSET\r\n$5\r\nkey-2\r\n$22\r\nvalues:{id:2,name:Bob}\r\n", + ), + ( + 2, + "*3\r\n$3\r\nSET\r\n$5\r\nkey-3\r\n$24\r\nvalues:{id:3,name:Clare}\r\n", + ), + ]; - async fn barrier(&mut self, _is_checkpoint: bool) -> Result<()> { - todo!() + redis_sink_writer + .payload_writer + .pipe + .cmd_iter() + .enumerate() + .zip_eq_debug(expected_a.clone()) + .for_each(|((i, cmd), (exp_i, exp_cmd))| { + if exp_i == i { + assert_eq!(exp_cmd, str::from_bytes(&cmd.get_packed_command()).unwrap()) + } + }); } } diff --git a/src/connector/src/sink/remote.rs b/src/connector/src/sink/remote.rs index 403ee4c7b73e5..3c52cb720dbd4 100644 --- a/src/connector/src/sink/remote.rs +++ b/src/connector/src/sink/remote.rs @@ -13,106 +13,111 @@ // limitations under the License. use std::collections::HashMap; +use std::fmt::Formatter; +use std::future::Future; use std::marker::PhantomData; +use std::ops::Deref; +use std::time::Instant; use anyhow::anyhow; use async_trait::async_trait; +use futures::stream::Peekable; +use futures::{StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; +use jni::objects::{JByteArray, JValue, JValueOwned}; use prost::Message; use risingwave_common::array::StreamChunk; -use risingwave_common::catalog::Schema; use risingwave_common::error::anyhow_error; use risingwave_common::types::DataType; +use risingwave_common::util::await_future_with_monitor_error_stream; +use risingwave_jni_core::jvm_runtime::JVM; +use risingwave_pb::connector_service::sink_coordinator_stream_request::{ + CommitMetadata, StartCoordinator, +}; use risingwave_pb::connector_service::sink_writer_stream_request::write_batch::json_payload::RowOp; use risingwave_pb::connector_service::sink_writer_stream_request::write_batch::{ JsonPayload, Payload, StreamChunkPayload, }; +use risingwave_pb::connector_service::sink_writer_stream_request::{ + Barrier, BeginEpoch, Request as SinkRequest, StartSink, WriteBatch, +}; use risingwave_pb::connector_service::sink_writer_stream_response::CommitResponse; -use risingwave_pb::connector_service::{SinkMetadata, SinkPayloadFormat}; -#[cfg(test)] -use risingwave_pb::connector_service::{SinkWriterStreamRequest, SinkWriterStreamResponse}; -use risingwave_rpc_client::{ConnectorClient, SinkCoordinatorStreamHandle, SinkWriterStreamHandle}; -#[cfg(test)] -use tokio::sync::mpsc::{Sender, UnboundedReceiver}; -#[cfg(test)] -use tonic::Status; -use tracing::{error, warn}; +use risingwave_pb::connector_service::{ + sink_coordinator_stream_request, sink_coordinator_stream_response, sink_writer_stream_response, + SinkCoordinatorStreamRequest, SinkCoordinatorStreamResponse, SinkMetadata, SinkPayloadFormat, + SinkWriterStreamRequest, SinkWriterStreamResponse, ValidateSinkRequest, ValidateSinkResponse, +}; +use tokio::sync::mpsc; +use tokio::sync::mpsc::{Receiver, Sender}; +use tokio_stream::wrappers::ReceiverStream; +use tracing::warn; +use super::encoder::{JsonEncoder, RowEncoder}; use crate::sink::coordinate::CoordinatedSinkWriter; -use crate::sink::iceberg::REMOTE_ICEBERG_SINK; -use crate::sink::utils::{record_to_json, TimestampHandlingMode}; -use crate::sink::SinkError::Remote; +use crate::sink::encoder::TimestampHandlingMode; +use crate::sink::log_store::{LogReader, LogStoreReadItem, TruncateOffset}; +use crate::sink::writer::{LogSinkerOf, SinkWriter, SinkWriterExt}; use crate::sink::{ - DummySinkCommitCoordinator, Result, Sink, SinkCommitCoordinator, SinkError, SinkParam, - SinkWriter, SinkWriterParam, + DummySinkCommitCoordinator, LogSinker, Result, Sink, SinkCommitCoordinator, SinkError, + SinkMetrics, SinkParam, SinkWriterParam, }; use crate::ConnectorParams; -pub const VALID_REMOTE_SINKS: [&str; 5] = [ - "jdbc", - REMOTE_ICEBERG_SINK, - "deltalake", - "elasticsearch-7", - "cassandra", -]; - -pub fn is_valid_remote_sink(connector_type: &str) -> bool { - VALID_REMOTE_SINKS.contains(&connector_type) -} - -#[derive(Clone, Debug)] -pub struct RemoteConfig { - pub connector_type: String, - pub properties: HashMap, +macro_rules! def_remote_sink { + () => { + def_remote_sink! { + { ElasticSearch, ElasticSearchSink, "elasticsearch" }, + { Cassandra, CassandraSink, "cassandra" }, + { Jdbc, JdbcSink, "jdbc" }, + { DeltaLake, DeltaLakeSink, "deltalake" } + } + }; + ($({ $variant_name:ident, $sink_type_name:ident, $sink_name:expr }),*) => { + $( + #[derive(Debug)] + pub struct $variant_name; + impl RemoteSinkTrait for $variant_name { + const SINK_NAME: &'static str = $sink_name; + } + pub type $sink_type_name = RemoteSink<$variant_name>; + )* + }; } -impl RemoteConfig { - pub fn from_hashmap(values: HashMap) -> Result { - let connector_type = values - .get("connector") - .expect("sink type must be specified") - .to_string(); - - if !is_valid_remote_sink(connector_type.as_str()) { - return Err(SinkError::Config(anyhow!( - "invalid connector type: {connector_type}" - ))); - } +def_remote_sink!(); - Ok(RemoteConfig { - connector_type, - properties: values, - }) - } +pub trait RemoteSinkTrait: Send + Sync + 'static { + const SINK_NAME: &'static str; } #[derive(Debug)] -pub struct RemoteSink { - config: RemoteConfig, +pub struct RemoteSink { param: SinkParam, + _phantom: PhantomData, } -impl RemoteSink { - pub fn new(config: RemoteConfig, param: SinkParam) -> Self { - Self { config, param } +impl TryFrom for RemoteSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + Ok(Self { + param, + _phantom: PhantomData, + }) } } -#[async_trait] -impl Sink for RemoteSink { +impl Sink for RemoteSink { type Coordinator = DummySinkCommitCoordinator; - type Writer = RemoteSinkWriter; + type LogSinker = RemoteLogSinker; - async fn new_writer(&self, writer_param: SinkWriterParam) -> Result { - Ok(RemoteSinkWriter::new( - self.config.clone(), - self.param.clone(), - writer_param.connector_params, - ) - .await?) + const SINK_NAME: &'static str = R::SINK_NAME; + + async fn new_log_sinker(&self, writer_param: SinkWriterParam) -> Result { + RemoteLogSinker::new(self.param.clone(), writer_param).await } - async fn validate(&self, client: Option) -> Result<()> { + async fn validate(&self) -> Result<()> { // FIXME: support struct and array in stream sink self.param.columns.iter().map(|col| { if matches!( @@ -144,33 +149,207 @@ impl Sink for RemoteSink { } }).try_collect()?; - let client = client.ok_or_else(|| { - SinkError::Remote(anyhow_error!( - "connector node endpoint not specified or unable to connect to connector node" - )) - })?; + let mut env = JVM + .get_or_init() + .map_err(|err| SinkError::Internal(err.into()))? + .attach_current_thread() + .map_err(|err| SinkError::Internal(err.into()))?; + let validate_sink_request = ValidateSinkRequest { + sink_param: Some(self.param.to_proto()), + }; + let validate_sink_request_bytes = env + .byte_array_from_slice(&Message::encode_to_vec(&validate_sink_request)) + .map_err(|err| SinkError::Internal(err.into()))?; - // We validate a remote sink's accessibility as well as the pk. - client - .validate_sink_properties(self.param.to_proto()) - .await - .map_err(SinkError::from) + let response = env + .call_static_method( + "com/risingwave/connector/JniSinkValidationHandler", + "validate", + "([B)[B", + &[JValue::Object(&validate_sink_request_bytes)], + ) + .map_err(|err| SinkError::Internal(err.into()))?; + + let validate_sink_response_bytes = match response { + JValueOwned::Object(o) => unsafe { JByteArray::from_raw(o.into_raw()) }, + _ => unreachable!(), + }; + + let validate_sink_response: ValidateSinkResponse = Message::decode( + risingwave_jni_core::to_guarded_slice(&validate_sink_response_bytes, &mut env) + .map_err(|err| SinkError::Internal(err.into()))? + .deref(), + ) + .map_err(|err| SinkError::Internal(err.into()))?; + + validate_sink_response.error.map_or_else( + || Ok(()), // If there is no error message, return Ok here. + |err| { + Err(SinkError::Remote(anyhow!(format!( + "sink cannot pass validation: {}", + err.error_message + )))) + }, + ) } } -#[derive(Debug)] -pub struct CoordinatedRemoteSink(pub RemoteSink); +pub struct RemoteLogSinker { + writer: RemoteSinkWriter, + sink_metrics: SinkMetrics, +} + +impl RemoteLogSinker { + async fn new(sink_param: SinkParam, writer_param: SinkWriterParam) -> Result { + let writer = RemoteSinkWriter::new( + sink_param, + writer_param.connector_params, + writer_param.sink_metrics.clone(), + ) + .await?; + let sink_metrics = writer_param.sink_metrics; + Ok(RemoteLogSinker { + writer, + sink_metrics, + }) + } +} + +/// Await the given future while monitoring on error of the receiver stream. +async fn await_future_with_monitor_receiver_err>>( + receiver: &mut SinkWriterStreamJniReceiver, + future: F, +) -> Result { + match await_future_with_monitor_error_stream(&mut receiver.response_stream, future).await { + Ok(result) => result, + Err(None) => Err(SinkError::Remote(anyhow!("end of remote receiver stream"))), + Err(Some(err)) => Err(SinkError::Internal(err)), + } +} #[async_trait] -impl Sink for CoordinatedRemoteSink { - type Coordinator = RemoteCoordinator; - type Writer = CoordinatedSinkWriter; +impl LogSinker for RemoteLogSinker { + async fn consume_log_and_sink(self, mut log_reader: impl LogReader) -> Result<()> { + // Note: this is a total copy of the implementation of LogSinkerOf, + // except that we monitor the future of `log_reader.next_item` with await_future_with_monitor_receiver_err + // to monitor the error in the response stream. + + let mut sink_writer = self.writer; + let sink_metrics = self.sink_metrics; + #[derive(Debug)] + enum LogConsumerState { + /// Mark that the log consumer is not initialized yet + Uninitialized, - async fn validate(&self, client: Option) -> Result<()> { - self.0.validate(client).await + /// Mark that a new epoch has begun. + EpochBegun { curr_epoch: u64 }, + + /// Mark that the consumer has just received a barrier + BarrierReceived { prev_epoch: u64 }, + } + + let mut state = LogConsumerState::Uninitialized; + + log_reader.init().await?; + + loop { + let (epoch, item): (u64, LogStoreReadItem) = await_future_with_monitor_receiver_err( + &mut sink_writer.stream_handle.response_rx, + log_reader.next_item().map_err(SinkError::Internal), + ) + .await?; + if let LogStoreReadItem::UpdateVnodeBitmap(_) = &item { + match &state { + LogConsumerState::BarrierReceived { .. } => {} + _ => unreachable!( + "update vnode bitmap can be accepted only right after \ + barrier, but current state is {:?}", + state + ), + } + } + // begin_epoch when not previously began + state = match state { + LogConsumerState::Uninitialized => { + sink_writer.begin_epoch(epoch).await?; + LogConsumerState::EpochBegun { curr_epoch: epoch } + } + LogConsumerState::EpochBegun { curr_epoch } => { + assert!( + epoch >= curr_epoch, + "new epoch {} should not be below the current epoch {}", + epoch, + curr_epoch + ); + LogConsumerState::EpochBegun { curr_epoch: epoch } + } + LogConsumerState::BarrierReceived { prev_epoch } => { + assert!( + epoch > prev_epoch, + "new epoch {} should be greater than prev epoch {}", + epoch, + prev_epoch + ); + sink_writer.begin_epoch(epoch).await?; + LogConsumerState::EpochBegun { curr_epoch: epoch } + } + }; + match item { + LogStoreReadItem::StreamChunk { chunk, .. } => { + if let Err(e) = sink_writer.write_batch(chunk).await { + sink_writer.abort().await?; + return Err(e); + } + } + LogStoreReadItem::Barrier { is_checkpoint } => { + let prev_epoch = match state { + LogConsumerState::EpochBegun { curr_epoch } => curr_epoch, + _ => unreachable!("epoch must have begun before handling barrier"), + }; + if is_checkpoint { + let start_time = Instant::now(); + sink_writer.barrier(true).await?; + sink_metrics + .sink_commit_duration_metrics + .observe(start_time.elapsed().as_millis() as f64); + log_reader + .truncate(TruncateOffset::Barrier { epoch }) + .await?; + } else { + sink_writer.barrier(false).await?; + } + state = LogConsumerState::BarrierReceived { prev_epoch } + } + LogStoreReadItem::UpdateVnodeBitmap(vnode_bitmap) => { + sink_writer.update_vnode_bitmap(vnode_bitmap).await?; + } + } + } + } +} + +#[derive(Debug)] +pub struct CoordinatedRemoteSink(pub RemoteSink); + +impl TryFrom for CoordinatedRemoteSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> std::result::Result { + RemoteSink::try_from(param).map(Self) + } +} + +impl Sink for CoordinatedRemoteSink { + type Coordinator = RemoteCoordinator; + type LogSinker = LogSinkerOf>>; + + const SINK_NAME: &'static str = R::SINK_NAME; + + async fn validate(&self) -> Result<()> { + self.0.validate().await } - async fn new_writer(&self, writer_param: SinkWriterParam) -> Result { + async fn new_log_sinker(&self, writer_param: SinkWriterParam) -> Result { Ok(CoordinatedSinkWriter::new( writer_param .meta_client @@ -184,87 +363,286 @@ impl Sink for CoordinatedRemoteSink { )) })?, CoordinatedRemoteSinkWriter::new( - self.0.config.clone(), self.0.param.clone(), writer_param.connector_params, + writer_param.sink_metrics.clone(), ) .await?, ) - .await?) + .await? + .into_log_sinker(writer_param.sink_metrics)) } - async fn new_coordinator( - &self, - connector_client: Option, - ) -> Result { - Ok(RemoteCoordinator::new( - connector_client - .ok_or_else(|| Remote(anyhow_error!("no connector client specified")))?, - self.0.param.clone(), + async fn new_coordinator(&self) -> Result { + RemoteCoordinator::new(self.0.param.clone()).await + } +} + +#[derive(Debug)] +pub struct SinkCoordinatorStreamJniHandle { + request_tx: Sender, + response_rx: Receiver, +} + +impl SinkCoordinatorStreamJniHandle { + pub async fn commit(&mut self, epoch: u64, metadata: Vec) -> Result<()> { + self.request_tx + .send(SinkCoordinatorStreamRequest { + request: Some(sink_coordinator_stream_request::Request::Commit( + CommitMetadata { epoch, metadata }, + )), + }) + .await + .map_err(|err| SinkError::Internal(err.into()))?; + + match self.response_rx.recv().await { + Some(SinkCoordinatorStreamResponse { + response: + Some(sink_coordinator_stream_response::Response::Commit( + sink_coordinator_stream_response::CommitResponse { + epoch: response_epoch, + }, + )), + }) => { + if epoch == response_epoch { + Ok(()) + } else { + Err(SinkError::Internal(anyhow!( + "get different response epoch to commit epoch: {} {}", + epoch, + response_epoch + ))) + } + } + msg => Err(SinkError::Internal(anyhow!( + "should get Commit response but get {:?}", + msg + ))), + } + } +} + +struct SinkWriterStreamJniSender { + request_tx: Sender, +} + +impl SinkWriterStreamJniSender { + pub async fn start_epoch(&mut self, epoch: u64) -> Result<()> { + self.request_tx + .send(SinkWriterStreamRequest { + request: Some(SinkRequest::BeginEpoch(BeginEpoch { epoch })), + }) + .await + .map_err(|err| SinkError::Internal(err.into())) + } + + pub async fn write_batch(&mut self, epoch: u64, batch_id: u64, payload: Payload) -> Result<()> { + self.request_tx + .send(SinkWriterStreamRequest { + request: Some(SinkRequest::WriteBatch(WriteBatch { + epoch, + batch_id, + payload: Some(payload), + })), + }) + .await + .map_err(|err| SinkError::Internal(err.into())) + } + + pub async fn barrier(&mut self, epoch: u64, is_checkpoint: bool) -> Result<()> { + self.request_tx + .send(SinkWriterStreamRequest { + request: Some(SinkRequest::Barrier(Barrier { + epoch, + is_checkpoint, + })), + }) + .await + .map_err(|err| SinkError::Internal(err.into())) + } +} + +struct SinkWriterStreamJniReceiver { + response_stream: Peekable>>, +} + +impl SinkWriterStreamJniReceiver { + async fn next_commit_response(&mut self) -> Result { + match self.response_stream.try_next().await { + Ok(Some(SinkWriterStreamResponse { + response: Some(sink_writer_stream_response::Response::Commit(rsp)), + })) => Ok(rsp), + msg => Err(SinkError::Internal(anyhow!( + "should get Sync response but get {:?}", + msg + ))), + } + } +} + +const DEFAULT_CHANNEL_SIZE: usize = 16; +struct SinkWriterStreamJniHandle { + request_tx: SinkWriterStreamJniSender, + response_rx: SinkWriterStreamJniReceiver, +} + +impl std::fmt::Debug for SinkWriterStreamJniHandle { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SinkWriterStreamJniHandle").finish() + } +} + +impl SinkWriterStreamJniHandle { + async fn start_epoch(&mut self, epoch: u64) -> Result<()> { + await_future_with_monitor_receiver_err( + &mut self.response_rx, + self.request_tx.start_epoch(epoch), + ) + .await + } + + async fn write_batch(&mut self, epoch: u64, batch_id: u64, payload: Payload) -> Result<()> { + await_future_with_monitor_receiver_err( + &mut self.response_rx, + self.request_tx.write_batch(epoch, batch_id, payload), ) - .await?) + .await + } + + async fn barrier(&mut self, epoch: u64) -> Result<()> { + await_future_with_monitor_receiver_err( + &mut self.response_rx, + self.request_tx.barrier(epoch, false), + ) + .await + } + + async fn commit(&mut self, epoch: u64) -> Result { + await_future_with_monitor_receiver_err( + &mut self.response_rx, + self.request_tx.barrier(epoch, true), + ) + .await?; + self.response_rx.next_commit_response().await } } -pub type RemoteSinkWriter = RemoteSinkWriterInner<()>; -pub type CoordinatedRemoteSinkWriter = RemoteSinkWriterInner>; +pub type RemoteSinkWriter = RemoteSinkWriterInner<(), R>; +pub type CoordinatedRemoteSinkWriter = RemoteSinkWriterInner, R>; -#[derive(Debug)] -pub struct RemoteSinkWriterInner { - pub connector_type: String, +pub struct RemoteSinkWriterInner { properties: HashMap, epoch: Option, batch_id: u64, - schema: Schema, payload_format: SinkPayloadFormat, - stream_handle: SinkWriterStreamHandle, - _phantom: PhantomData, + stream_handle: SinkWriterStreamJniHandle, + json_encoder: JsonEncoder, + sink_metrics: SinkMetrics, + _phantom: PhantomData<(SM, R)>, } -impl RemoteSinkWriterInner { +impl RemoteSinkWriterInner { pub async fn new( - config: RemoteConfig, param: SinkParam, connector_params: ConnectorParams, + sink_metrics: SinkMetrics, ) -> Result { - let client = connector_params.connector_client.ok_or_else(|| { - SinkError::Remote(anyhow_error!( - "connector node endpoint not specified or unable to connect to connector node" - )) - })?; - let stream_handle = client - .start_sink_writer_stream(param.to_proto(), connector_params.sink_payload_format) + let (request_tx, request_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + let (response_tx, response_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + + let mut response_stream = ReceiverStream::new(response_rx).peekable(); + + std::thread::spawn(move || { + let mut env = JVM.get_or_init().unwrap().attach_current_thread().unwrap(); + + let result = env.call_static_method( + "com/risingwave/connector/JniSinkWriterHandler", + "runJniSinkWriterThread", + "(JJ)V", + &[ + JValue::from(&request_rx as *const Receiver as i64), + JValue::from( + &response_tx as *const Sender> + as i64, + ), + ], + ); + + match result { + Ok(_) => { + tracing::info!("end of jni call runJniSinkWriterThread"); + } + Err(e) => { + tracing::error!("jni call error: {:?}", e); + } + }; + }); + + let sink_writer_stream_request = SinkWriterStreamRequest { + request: Some(SinkRequest::Start(StartSink { + sink_param: Some(param.to_proto()), + format: connector_params.sink_payload_format as i32, + })), + }; + + // First request + request_tx + .send(sink_writer_stream_request) .await - .inspect_err(|e| { - error!( - "failed to start sink stream for connector `{}`: {:?}", - &config.connector_type, e - ) + .map_err(|err| { + SinkError::Internal(anyhow!( + "fail to send start request for connector `{}`: {:?}", + R::SINK_NAME, + err + )) })?; + + // First response + match response_stream.try_next().await { + Ok(Some(SinkWriterStreamResponse { + response: Some(sink_writer_stream_response::Response::Start(_)), + })) => {} + Ok(msg) => { + return Err(SinkError::Internal(anyhow!( + "should get start response for connector `{}` but get {:?}", + R::SINK_NAME, + msg + ))); + } + Err(e) => return Err(SinkError::Internal(e)), + }; + tracing::trace!( "{:?} sink stream started with properties: {:?}", - &config.connector_type, - &config.properties + R::SINK_NAME, + ¶m.properties ); + let schema = param.schema(); + + let stream_handle = SinkWriterStreamJniHandle { + request_tx: SinkWriterStreamJniSender { request_tx }, + response_rx: SinkWriterStreamJniReceiver { response_stream }, + }; + Ok(Self { - connector_type: config.connector_type, - properties: config.properties, + properties: param.properties, epoch: None, batch_id: 0, - schema: param.schema(), stream_handle, payload_format: connector_params.sink_payload_format, + json_encoder: JsonEncoder::new(schema, None, TimestampHandlingMode::String), + sink_metrics, _phantom: PhantomData, }) } #[cfg(test)] fn for_test( - response_receiver: UnboundedReceiver>, + response_receiver: Receiver>, request_sender: Sender, - ) -> RemoteSinkWriter { - use risingwave_common::catalog::Field; + ) -> RemoteSinkWriter { + use risingwave_common::catalog::{Field, Schema}; let properties = HashMap::from([("output.path".to_string(), "/tmp/rw".to_string())]); let schema = Schema::new(vec![ @@ -282,22 +660,23 @@ impl RemoteSinkWriterInner { }, ]); - use futures::StreamExt; - use tokio_stream::wrappers::UnboundedReceiverStream; - - let stream_handle = SinkWriterStreamHandle::for_test( - request_sender, - UnboundedReceiverStream::new(response_receiver).boxed(), - ); + let stream_handle = SinkWriterStreamJniHandle { + request_tx: SinkWriterStreamJniSender { + request_tx: request_sender, + }, + response_rx: SinkWriterStreamJniReceiver { + response_stream: ReceiverStream::new(response_receiver).peekable(), + }, + }; RemoteSinkWriter { - connector_type: "file".to_string(), properties, epoch: None, batch_id: 0, - schema, + json_encoder: JsonEncoder::new(schema, None, TimestampHandlingMode::String), stream_handle, payload_format: SinkPayloadFormat::Json, + sink_metrics: SinkMetrics::for_test(), _phantom: PhantomData, } } @@ -309,7 +688,7 @@ trait HandleBarrierResponse { fn non_checkpoint_return_value() -> Self::SinkMetadata; } -impl HandleBarrierResponse for RemoteSinkWriter { +impl HandleBarrierResponse for RemoteSinkWriter { type SinkMetadata = (); fn handle_commit_response(rsp: CommitResponse) -> Result { @@ -322,7 +701,7 @@ impl HandleBarrierResponse for RemoteSinkWriter { fn non_checkpoint_return_value() -> Self::SinkMetadata {} } -impl HandleBarrierResponse for CoordinatedRemoteSinkWriter { +impl HandleBarrierResponse for CoordinatedRemoteSinkWriter { type SinkMetadata = Option; fn handle_commit_response(rsp: CommitResponse) -> Result { @@ -341,22 +720,22 @@ impl HandleBarrierResponse for CoordinatedRemoteSinkWriter { } #[async_trait] -impl SinkWriter for RemoteSinkWriterInner +impl SinkWriter for RemoteSinkWriterInner where Self: HandleBarrierResponse, { type CommitMetadata = SM; async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()> { + let cardinality = chunk.cardinality(); + self.sink_metrics + .connector_sink_rows_received + .inc_by(cardinality as _); let payload = match self.payload_format { SinkPayloadFormat::Json => { - let mut row_ops = vec![]; + let mut row_ops = Vec::with_capacity(cardinality); for (op, row_ref) in chunk.rows() { - let map = record_to_json( - row_ref, - &self.schema.fields, - TimestampHandlingMode::String, - )?; + let map = self.json_encoder.encode(row_ref)?; let row_op = RowOp { op_type: op.to_protobuf() as i32, line: serde_json::to_string(&map) @@ -415,21 +794,98 @@ where } } -pub struct RemoteCoordinator { - stream_handle: SinkCoordinatorStreamHandle, +pub struct RemoteCoordinator { + stream_handle: SinkCoordinatorStreamJniHandle, + _phantom: PhantomData, } -impl RemoteCoordinator { - pub async fn new(client: ConnectorClient, param: SinkParam) -> Result { - let stream_handle = client - .start_sink_coordinator_stream(param.to_proto()) - .await?; - Ok(RemoteCoordinator { stream_handle }) +impl RemoteCoordinator { + pub async fn new(param: SinkParam) -> Result { + let (request_tx, request_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + let (response_tx, response_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + + let mut stream_handle = SinkCoordinatorStreamJniHandle { + request_tx, + response_rx, + }; + + std::thread::spawn(move || { + let mut env = JVM.get_or_init().unwrap().attach_current_thread().unwrap(); + + let result = env.call_static_method( + "com/risingwave/connector/JniSinkCoordinatorHandler", + "runJniSinkCoordinatorThread", + "(JJ)V", + &[ + JValue::from( + &request_rx as *const Receiver as i64, + ), + JValue::from( + &response_tx as *const Sender as i64, + ), + ], + ); + + match result { + Ok(_) => { + tracing::info!("end of jni call runJniSinkCoordinatorThread"); + } + Err(e) => { + tracing::error!("jni call error: {:?}", e); + } + }; + }); + + let sink_coordinator_stream_request = SinkCoordinatorStreamRequest { + request: Some(sink_coordinator_stream_request::Request::Start( + StartCoordinator { + param: Some(param.to_proto()), + }, + )), + }; + + // First request + stream_handle + .request_tx + .send(sink_coordinator_stream_request) + .await + .map_err(|err| { + SinkError::Internal(anyhow!( + "fail to send start request for connector `{}`: {:?}", + R::SINK_NAME, + err + )) + })?; + + // First response + match stream_handle.response_rx.recv().await { + Some(SinkCoordinatorStreamResponse { + response: Some(sink_coordinator_stream_response::Response::Start(_)), + }) => {} + msg => { + return Err(SinkError::Internal(anyhow!( + "should get start response for connector `{}` but get {:?}", + R::SINK_NAME, + msg + ))); + } + }; + + tracing::trace!( + "{:?} RemoteCoordinator started with properties: {:?}", + R::SINK_NAME, + ¶m.properties + ); + + Ok(RemoteCoordinator { + stream_handle, + _phantom: PhantomData, + }) } } #[async_trait] -impl SinkCommitCoordinator for RemoteCoordinator { +impl SinkCommitCoordinator for RemoteCoordinator { async fn init(&mut self) -> Result<()> { Ok(()) } @@ -452,15 +908,20 @@ mod test { use risingwave_pb::data; use tokio::sync::mpsc; - use crate::sink::remote::RemoteSinkWriter; + use crate::sink::remote::{RemoteSinkTrait, RemoteSinkWriter}; use crate::sink::SinkWriter; + struct TestRemote; + impl RemoteSinkTrait for TestRemote { + const SINK_NAME: &'static str = "test-remote"; + } + #[tokio::test] async fn test_epoch_check() { let (request_sender, mut request_recv) = mpsc::channel(16); - let (_, resp_recv) = mpsc::unbounded_channel(); + let (_, resp_recv) = mpsc::channel(16); - let mut sink = RemoteSinkWriter::for_test(resp_recv, request_sender); + let mut sink = >::for_test(resp_recv, request_sender); let chunk = StreamChunk::from_pretty( " i T + 1 Ripper @@ -496,8 +957,8 @@ mod test { #[tokio::test] async fn test_remote_sink() { let (request_sender, mut request_receiver) = mpsc::channel(16); - let (response_sender, response_receiver) = mpsc::unbounded_channel(); - let mut sink = RemoteSinkWriter::for_test(response_receiver, request_sender); + let (response_sender, response_receiver) = mpsc::channel(16); + let mut sink = >::for_test(response_receiver, request_sender); let chunk_a = StreamChunk::from_pretty( " i T @@ -558,6 +1019,7 @@ mod test { metadata: None, })), })) + .await .expect("test failed: failed to sync epoch"); sink.barrier(true).await.unwrap(); let commit_request = request_receiver.recv().await.unwrap(); diff --git a/src/connector/src/sink/test_sink.rs b/src/connector/src/sink/test_sink.rs index d288253eb7a23..6f327ceaf9cbc 100644 --- a/src/connector/src/sink/test_sink.rs +++ b/src/connector/src/sink/test_sink.rs @@ -14,23 +14,62 @@ use std::sync::{Arc, OnceLock}; +use anyhow::anyhow; use parking_lot::Mutex; -use crate::sink::boxed::BoxSink; -use crate::sink::{SinkError, SinkParam}; +use crate::sink::boxed::{BoxCoordinator, BoxWriter}; +use crate::sink::writer::{LogSinkerOf, SinkWriterExt}; +use crate::sink::{Sink, SinkError, SinkParam, SinkWriterParam}; -pub type BuildBoxSink = - Box Result + Send + Sync + 'static>; +pub trait BuildBoxWriterTrait = FnMut(SinkParam, SinkWriterParam) -> BoxWriter<()> + Send + 'static; + +pub type BuildBoxWriter = Box; pub const TEST_SINK_NAME: &str = "test"; +#[derive(Debug)] +pub struct TestSink { + param: SinkParam, +} + +impl TryFrom for TestSink { + type Error = SinkError; + + fn try_from(param: SinkParam) -> Result { + if cfg!(any(madsim, test)) { + Ok(TestSink { param }) + } else { + Err(SinkError::Config(anyhow!("test sink only support in test"))) + } + } +} + +impl Sink for TestSink { + type Coordinator = BoxCoordinator; + type LogSinker = LogSinkerOf>; + + const SINK_NAME: &'static str = "test"; + + async fn validate(&self) -> crate::sink::Result<()> { + Ok(()) + } + + async fn new_log_sinker( + &self, + writer_param: SinkWriterParam, + ) -> crate::sink::Result { + let metrics = writer_param.sink_metrics.clone(); + Ok(build_box_writer(self.param.clone(), writer_param).into_log_sinker(metrics)) + } +} + struct TestSinkRegistry { - build_box_sink: Arc>>, + build_box_writer: Arc>>, } impl TestSinkRegistry { fn new() -> Self { TestSinkRegistry { - build_box_sink: Arc::new(Mutex::new(None)), + build_box_writer: Arc::new(Mutex::new(None)), } } } @@ -44,25 +83,23 @@ pub struct TestSinkRegistryGuard; impl Drop for TestSinkRegistryGuard { fn drop(&mut self) { - assert!(get_registry().build_box_sink.lock().take().is_some()); + assert!(get_registry().build_box_writer.lock().take().is_some()); } } -pub fn registry_build_sink( - build_sink: impl Fn(SinkParam) -> Result + Send + Sync + 'static, -) -> TestSinkRegistryGuard { +pub fn registry_build_sink(build_box_writer: impl BuildBoxWriterTrait) -> TestSinkRegistryGuard { assert!(get_registry() - .build_box_sink + .build_box_writer .lock() - .replace(Box::new(build_sink)) + .replace(Box::new(build_box_writer)) .is_none()); TestSinkRegistryGuard } -pub fn build_test_sink(param: SinkParam) -> Result { +pub fn build_box_writer(param: SinkParam, writer_param: SinkWriterParam) -> BoxWriter<()> { (get_registry() - .build_box_sink + .build_box_writer .lock() - .as_ref() - .expect("should not be empty"))(param) + .as_mut() + .expect("should not be empty"))(param, writer_param) } diff --git a/src/connector/src/sink/utils.rs b/src/connector/src/sink/utils.rs index 1eb2f91e9af22..967c3fc43ba30 100644 --- a/src/connector/src/sink/utils.rs +++ b/src/connector/src/sink/utils.rs @@ -12,568 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use base64::engine::general_purpose; -use base64::Engine as _; -use chrono::{Datelike, Timelike}; -use futures_async_stream::try_stream; -use risingwave_common::array::stream_chunk::Op; -use risingwave_common::array::{ArrayError, ArrayResult, RowRef, StreamChunk}; -use risingwave_common::catalog::{Field, Schema}; -use risingwave_common::row::Row; -use risingwave_common::types::{DataType, DatumRef, ScalarRefImpl, ToText}; -use risingwave_common::util::iter_util::{ZipEqDebug, ZipEqFast}; -use serde_json::{json, Map, Value}; -use tracing::warn; +use risingwave_common::array::StreamChunk; +use serde_json::Value; -use crate::sink::{Result, SinkError}; +use super::encoder::{JsonEncoder, RowEncoder}; +use crate::sink::Result; -const DEBEZIUM_NAME_FIELD_PREFIX: &str = "RisingWave"; - -pub struct DebeziumAdapterOpts { - gen_tombstone: bool, -} - -impl Default for DebeziumAdapterOpts { - fn default() -> Self { - Self { - gen_tombstone: true, - } - } -} - -fn concat_debezium_name_field(db_name: &str, sink_from_name: &str, value: &str) -> String { - DEBEZIUM_NAME_FIELD_PREFIX.to_owned() + "." + db_name + "." + sink_from_name + "." + value -} - -#[try_stream(ok = (Option, Option), error = SinkError)] -pub async fn gen_debezium_message_stream<'a>( - schema: &'a Schema, - pk_indices: &'a [usize], - chunk: StreamChunk, - ts_ms: u64, - opts: DebeziumAdapterOpts, - db_name: &'a str, - sink_from_name: &'a str, -) { - let source_field = json!({ - "db": db_name, - "table": sink_from_name, - }); - - let mut update_cache: Option> = None; - - for (op, row) in chunk.rows() { - let event_key_object: Option = Some(json!({ - "schema": json!({ - "type": "struct", - "fields": fields_pk_to_json(&schema.fields, pk_indices), - "optional": false, - "name": concat_debezium_name_field(db_name, sink_from_name, "Key"), - }), - "payload": pk_to_json(row, &schema.fields, pk_indices)?, - })); - let event_object: Option = match op { - Op::Insert => Some(json!({ - "schema": schema_to_json(schema, db_name, sink_from_name), - "payload": { - "before": null, - "after": record_to_json(row, &schema.fields, TimestampHandlingMode::Milli)?, - "op": "c", - "ts_ms": ts_ms, - "source": source_field, - } - })), - Op::Delete => { - let value_obj = Some(json!({ - "schema": schema_to_json(schema, db_name, sink_from_name), - "payload": { - "before": record_to_json(row, &schema.fields, TimestampHandlingMode::Milli)?, - "after": null, - "op": "d", - "ts_ms": ts_ms, - "source": source_field, - } - })); - yield (event_key_object.clone(), value_obj); - - if opts.gen_tombstone { - // Tomestone event - // https://debezium.io/documentation/reference/2.1/connectors/postgresql.html#postgresql-delete-events - yield (event_key_object, None); - } - - continue; - } - Op::UpdateDelete => { - update_cache = Some(record_to_json( - row, - &schema.fields, - TimestampHandlingMode::Milli, - )?); - continue; - } - Op::UpdateInsert => { - if let Some(before) = update_cache.take() { - Some(json!({ - "schema": schema_to_json(schema, db_name, sink_from_name), - "payload": { - "before": before, - "after": record_to_json(row, &schema.fields, TimestampHandlingMode::Milli)?, - "op": "u", - "ts_ms": ts_ms, - "source": source_field, - } - })) - } else { - warn!( - "not found UpdateDelete in prev row, skipping, row index {:?}", - row.index() - ); - continue; - } - } - }; - yield (event_key_object, event_object); - } -} - -pub(crate) fn schema_to_json(schema: &Schema, db_name: &str, sink_from_name: &str) -> Value { - let mut schema_fields = Vec::new(); - schema_fields.push(json!({ - "type": "struct", - "fields": fields_to_json(&schema.fields), - "optional": true, - "field": "before", - "name": concat_debezium_name_field(db_name, sink_from_name, "Key"), - })); - schema_fields.push(json!({ - "type": "struct", - "fields": fields_to_json(&schema.fields), - "optional": true, - "field": "after", - "name": concat_debezium_name_field(db_name, sink_from_name, "Key"), - })); - - schema_fields.push(json!({ - "type": "struct", - "optional": false, - "name": concat_debezium_name_field(db_name, sink_from_name, "Source"), - "fields": vec![ - json!({ - "type": "string", - "optional": false, - "field": "db" - }), - json!({ - "type": "string", - "optional": true, - "field": "table" - })], - "field": "source" - })); - schema_fields.push(json!({ - "type": "string", - "optional": false, - "field": "op" - })); - schema_fields.push(json!({ - "type": "int64", - "optional": false, - "field": "ts_ms" - })); - - json!({ - "type": "struct", - "fields": schema_fields, - "optional": false, - "name": concat_debezium_name_field(db_name, sink_from_name, "Envelope"), - }) -} - -pub(crate) fn fields_pk_to_json(fields: &[Field], pk_indices: &[usize]) -> Value { - let mut res = Vec::new(); - for idx in pk_indices { - res.push(field_to_json(&fields[*idx])); - } - json!(res) -} - -pub(crate) fn fields_to_json(fields: &[Field]) -> Value { - let mut res = Vec::new(); - - fields - .iter() - .for_each(|field| res.push(field_to_json(field))); - - json!(res) -} - -pub(crate) fn field_to_json(field: &Field) -> Value { - // mapping from 'https://debezium.io/documentation/reference/2.1/connectors/postgresql.html#postgresql-data-types' - let r#type = match field.data_type() { - risingwave_common::types::DataType::Boolean => "boolean", - risingwave_common::types::DataType::Int16 => "int16", - risingwave_common::types::DataType::Int32 => "int32", - risingwave_common::types::DataType::Int64 => "int64", - risingwave_common::types::DataType::Int256 => "string", - risingwave_common::types::DataType::Float32 => "float", - risingwave_common::types::DataType::Float64 => "double", - // currently, we only support handling decimal as string. - // https://debezium.io/documentation/reference/2.1/connectors/postgresql.html#postgresql-decimal-types - risingwave_common::types::DataType::Decimal => "string", - - risingwave_common::types::DataType::Varchar => "string", - - risingwave_common::types::DataType::Date => "int32", - risingwave_common::types::DataType::Time => "int64", - risingwave_common::types::DataType::Timestamp => "int64", - risingwave_common::types::DataType::Timestamptz => "string", - risingwave_common::types::DataType::Interval => "string", - - risingwave_common::types::DataType::Bytea => "bytes", - risingwave_common::types::DataType::Jsonb => "string", - risingwave_common::types::DataType::Serial => "int32", - // since the original debezium pg support HSTORE via encoded as json string by default, - // we do the same here - risingwave_common::types::DataType::Struct(_) => "string", - risingwave_common::types::DataType::List { .. } => "string", - }; - json!({ - "field": field.name, - "optional": true, - "type": r#type, - }) -} - -pub(crate) fn pk_to_json( - row: RowRef<'_>, - schema: &[Field], - pk_indices: &[usize], -) -> Result> { - let mut mappings = Map::with_capacity(schema.len()); - for idx in pk_indices { - let field = &schema[*idx]; - let key = field.name.clone(); - let value = datum_to_json_object(field, row.datum_at(*idx), TimestampHandlingMode::Milli) - .map_err(|e| SinkError::JsonParse(e.to_string()))?; - mappings.insert(key, value); - } - Ok(mappings) -} - -pub fn chunk_to_json(chunk: StreamChunk, schema: &Schema) -> Result> { +pub fn chunk_to_json(chunk: StreamChunk, encoder: &JsonEncoder) -> Result> { let mut records: Vec = Vec::with_capacity(chunk.capacity()); for (_, row) in chunk.rows() { - let record = Value::Object(record_to_json( - row, - &schema.fields, - TimestampHandlingMode::Milli, - )?); + let record = Value::Object(encoder.encode(row)?); records.push(record.to_string()); } Ok(records) } - -#[derive(Clone, Copy)] -pub enum TimestampHandlingMode { - Milli, - String, -} - -pub fn record_to_json( - row: RowRef<'_>, - schema: &[Field], - timestamp_handling_mode: TimestampHandlingMode, -) -> Result> { - let mut mappings = Map::with_capacity(schema.len()); - for (field, datum_ref) in schema.iter().zip_eq_fast(row.iter()) { - let key = field.name.clone(); - let value = datum_to_json_object(field, datum_ref, timestamp_handling_mode) - .map_err(|e| SinkError::JsonParse(e.to_string()))?; - mappings.insert(key, value); - } - Ok(mappings) -} - -fn datum_to_json_object( - field: &Field, - datum: DatumRef<'_>, - timestamp_handling_mode: TimestampHandlingMode, -) -> ArrayResult { - let scalar_ref = match datum { - None => return Ok(Value::Null), - Some(datum) => datum, - }; - - let data_type = field.data_type(); - - tracing::debug!("datum_to_json_object: {:?}, {:?}", data_type, scalar_ref); - - let value = match (data_type, scalar_ref) { - (DataType::Boolean, ScalarRefImpl::Bool(v)) => { - json!(v) - } - (DataType::Int16, ScalarRefImpl::Int16(v)) => { - json!(v) - } - (DataType::Int32, ScalarRefImpl::Int32(v)) => { - json!(v) - } - (DataType::Int64, ScalarRefImpl::Int64(v)) => { - json!(v) - } - (DataType::Float32, ScalarRefImpl::Float32(v)) => { - json!(f32::from(v)) - } - (DataType::Float64, ScalarRefImpl::Float64(v)) => { - json!(f64::from(v)) - } - (DataType::Varchar, ScalarRefImpl::Utf8(v)) => { - json!(v) - } - (DataType::Decimal, ScalarRefImpl::Decimal(v)) => { - json!(v.to_text()) - } - (DataType::Timestamptz, ScalarRefImpl::Timestamptz(v)) => { - // risingwave's timestamp with timezone is stored in UTC and does not maintain the - // timezone info and the time is in microsecond. - let parsed = v.to_datetime_utc().naive_utc(); - let v = parsed.format("%Y-%m-%d %H:%M:%S%.6f").to_string(); - json!(v) - } - (DataType::Time, ScalarRefImpl::Time(v)) => { - // todo: just ignore the nanos part to avoid leap second complex - json!(v.0.num_seconds_from_midnight() as i64 * 1000) - } - (DataType::Date, ScalarRefImpl::Date(v)) => { - json!(v.0.num_days_from_ce()) - } - (DataType::Timestamp, ScalarRefImpl::Timestamp(v)) => match timestamp_handling_mode { - TimestampHandlingMode::Milli => json!(v.0.timestamp_millis()), - TimestampHandlingMode::String => json!(v.0.format("%Y-%m-%d %H:%M:%S%.6f").to_string()), - }, - (DataType::Bytea, ScalarRefImpl::Bytea(v)) => { - json!(general_purpose::STANDARD_NO_PAD.encode(v)) - } - // PYMDTHMS - (DataType::Interval, ScalarRefImpl::Interval(v)) => { - json!(v.as_iso_8601()) - } - (DataType::Jsonb, ScalarRefImpl::Jsonb(jsonb_ref)) => { - json!(jsonb_ref.to_string()) - } - (DataType::List(datatype), ScalarRefImpl::List(list_ref)) => { - let elems = list_ref.iter(); - let mut vec = Vec::with_capacity(elems.len()); - let inner_field = Field::unnamed(Box::::into_inner(datatype)); - for sub_datum_ref in elems { - let value = - datum_to_json_object(&inner_field, sub_datum_ref, timestamp_handling_mode)?; - vec.push(value); - } - json!(vec) - } - (DataType::Struct(st), ScalarRefImpl::Struct(struct_ref)) => { - let mut map = Map::with_capacity(st.len()); - for (sub_datum_ref, sub_field) in struct_ref.iter_fields_ref().zip_eq_debug( - st.iter() - .map(|(name, dt)| Field::with_name(dt.clone(), name)), - ) { - let value = - datum_to_json_object(&sub_field, sub_datum_ref, timestamp_handling_mode)?; - map.insert(sub_field.name.clone(), value); - } - json!(map) - } - (data_type, scalar_ref) => { - return Err(ArrayError::internal( - format!("datum_to_json_object: unsupported data type: field name: {:?}, logical type: {:?}, physical type: {:?}", field.name, data_type, scalar_ref), - )); - } - }; - - Ok(value) -} - -#[derive(Debug, Clone, Default)] -pub struct UpsertAdapterOpts {} - -#[try_stream(ok = (Option, Option), error = SinkError)] -pub async fn gen_upsert_message_stream<'a>( - schema: &'a Schema, - pk_indices: &'a [usize], - chunk: StreamChunk, - _opts: UpsertAdapterOpts, -) { - for (op, row) in chunk.rows() { - let event_key_object = Some(Value::Object(pk_to_json(row, &schema.fields, pk_indices)?)); - - let event_object = match op { - Op::Insert => Some(Value::Object(record_to_json( - row, - &schema.fields, - TimestampHandlingMode::Milli, - )?)), - Op::Delete => Some(Value::Null), - Op::UpdateDelete => { - // upsert semantic does not require update delete event - continue; - } - Op::UpdateInsert => Some(Value::Object(record_to_json( - row, - &schema.fields, - TimestampHandlingMode::Milli, - )?)), - }; - - yield (event_key_object, event_object); - } -} - -#[derive(Debug, Clone, Default)] -pub struct AppendOnlyAdapterOpts {} - -#[try_stream(ok = (Option, Option), error = SinkError)] -pub async fn gen_append_only_message_stream<'a>( - schema: &'a Schema, - pk_indices: &'a [usize], - chunk: StreamChunk, - _opts: AppendOnlyAdapterOpts, -) { - for (op, row) in chunk.rows() { - if op != Op::Insert { - continue; - } - let event_key_object = Some(Value::Object(pk_to_json(row, &schema.fields, pk_indices)?)); - let event_object = Some(Value::Object(record_to_json( - row, - &schema.fields, - TimestampHandlingMode::Milli, - )?)); - - yield (event_key_object, event_object); - } -} - -#[cfg(test)] -mod tests { - - use risingwave_common::types::{DataType, Interval, ScalarImpl, Time, Timestamp}; - - use super::*; - #[test] - fn test_to_json_basic_type() { - let mock_field = Field { - data_type: DataType::Boolean, - name: Default::default(), - sub_fields: Default::default(), - type_name: Default::default(), - }; - let boolean_value = datum_to_json_object( - &Field { - data_type: DataType::Boolean, - ..mock_field.clone() - }, - Some(ScalarImpl::Bool(false).as_scalar_ref_impl()), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!(boolean_value, json!(false)); - - let int16_value = datum_to_json_object( - &Field { - data_type: DataType::Int16, - ..mock_field.clone() - }, - Some(ScalarImpl::Int16(16).as_scalar_ref_impl()), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!(int16_value, json!(16)); - - let int64_value = datum_to_json_object( - &Field { - data_type: DataType::Int64, - ..mock_field.clone() - }, - Some(ScalarImpl::Int64(std::i64::MAX).as_scalar_ref_impl()), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!( - serde_json::to_string(&int64_value).unwrap(), - std::i64::MAX.to_string() - ); - - // https://github.com/debezium/debezium/blob/main/debezium-core/src/main/java/io/debezium/time/ZonedTimestamp.java - let tstz_inner = "2018-01-26T18:30:09.453Z".parse().unwrap(); - let tstz_value = datum_to_json_object( - &Field { - data_type: DataType::Timestamptz, - ..mock_field.clone() - }, - Some(ScalarImpl::Timestamptz(tstz_inner).as_scalar_ref_impl()), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!(tstz_value, "2018-01-26 18:30:09.453000"); - - let ts_value = datum_to_json_object( - &Field { - data_type: DataType::Timestamp, - ..mock_field.clone() - }, - Some( - ScalarImpl::Timestamp(Timestamp::from_timestamp_uncheck(1000, 0)) - .as_scalar_ref_impl(), - ), - TimestampHandlingMode::Milli, - ) - .unwrap(); - assert_eq!(ts_value, json!(1000 * 1000)); - - let ts_value = datum_to_json_object( - &Field { - data_type: DataType::Timestamp, - ..mock_field.clone() - }, - Some( - ScalarImpl::Timestamp(Timestamp::from_timestamp_uncheck(1000, 0)) - .as_scalar_ref_impl(), - ), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!(ts_value, json!("1970-01-01 00:16:40.000000".to_string())); - - // Represents the number of microseconds past midnigh, io.debezium.time.Time - let time_value = datum_to_json_object( - &Field { - data_type: DataType::Time, - ..mock_field.clone() - }, - Some( - ScalarImpl::Time(Time::from_num_seconds_from_midnight_uncheck(1000, 0)) - .as_scalar_ref_impl(), - ), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!(time_value, json!(1000 * 1000)); - - let interval_value = datum_to_json_object( - &Field { - data_type: DataType::Interval, - ..mock_field - }, - Some( - ScalarImpl::Interval(Interval::from_month_day_usec(13, 2, 1000000)) - .as_scalar_ref_impl(), - ), - TimestampHandlingMode::String, - ) - .unwrap(); - assert_eq!(interval_value, json!("P1Y1M2DT0H0M1S")); - } -} diff --git a/src/connector/src/sink/writer.rs b/src/connector/src/sink/writer.rs new file mode 100644 index 0000000000000..64261bb42ab48 --- /dev/null +++ b/src/connector/src/sink/writer.rs @@ -0,0 +1,289 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::future::{Future, Ready}; +use std::pin::pin; +use std::sync::Arc; +use std::time::Instant; + +use async_trait::async_trait; +use futures::future::{select, Either}; +use futures::TryFuture; +use risingwave_common::array::StreamChunk; +use risingwave_common::buffer::Bitmap; +use risingwave_common::util::drop_either_future; + +use crate::sink::encoder::SerTo; +use crate::sink::formatter::SinkFormatter; +use crate::sink::log_store::{ + DeliveryFutureManager, DeliveryFutureManagerAddFuture, LogReader, LogStoreReadItem, + TruncateOffset, +}; +use crate::sink::{LogSinker, Result, SinkError, SinkMetrics}; + +#[async_trait] +pub trait SinkWriter: Send + 'static { + type CommitMetadata: Send = (); + /// Begin a new epoch + async fn begin_epoch(&mut self, epoch: u64) -> Result<()>; + + /// Write a stream chunk to sink + async fn write_batch(&mut self, chunk: StreamChunk) -> Result<()>; + + /// Receive a barrier and mark the end of current epoch. When `is_checkpoint` is true, the sink + /// writer should commit the current epoch. + async fn barrier(&mut self, is_checkpoint: bool) -> Result; + + /// Clean up + async fn abort(&mut self) -> Result<()> { + Ok(()) + } + + /// Update the vnode bitmap of current sink writer + async fn update_vnode_bitmap(&mut self, _vnode_bitmap: Arc) -> Result<()> { + Ok(()) + } +} + +pub type DummyDeliveryFuture = Ready>; + +pub trait AsyncTruncateSinkWriter: Send + 'static { + type DeliveryFuture: TryFuture + Unpin + Send + 'static = + DummyDeliveryFuture; + + fn write_chunk<'a>( + &'a mut self, + chunk: StreamChunk, + add_future: DeliveryFutureManagerAddFuture<'a, Self::DeliveryFuture>, + ) -> impl Future> + Send + 'a; +} + +/// A free-form sink that may output in multiple formats and encodings. Examples include kafka, +/// kinesis, nats and redis. +/// +/// The implementor specifies required key & value type (likely string or bytes), as well as how to +/// write a single pair. The provided `write_chunk` method would handle the interaction with a +/// `SinkFormatter`. +/// +/// Currently kafka takes `&mut self` while kinesis takes `&self`. So we use `&mut self` in trait +/// but implement it for `&Kinesis`. This allows us to hold `&mut &Kinesis` and `&Kinesis` +/// simultaneously, preventing the schema clone issue propagating from kafka to kinesis. +pub trait FormattedSink { + type K; + type V; + async fn write_one(&mut self, k: Option, v: Option) -> Result<()>; + + async fn write_chunk( + &mut self, + chunk: StreamChunk, + formatter: &F, + ) -> Result<()> + where + F::K: SerTo, + F::V: SerTo, + { + for r in formatter.format_chunk(&chunk) { + let (event_key_object, event_object) = r?; + + self.write_one( + event_key_object.map(SerTo::ser_to).transpose()?, + event_object.map(SerTo::ser_to).transpose()?, + ) + .await?; + } + + Ok(()) + } +} + +pub struct LogSinkerOf { + writer: W, + sink_metrics: SinkMetrics, +} + +impl LogSinkerOf { + pub fn new(writer: W, sink_metrics: SinkMetrics) -> Self { + LogSinkerOf { + writer, + sink_metrics, + } + } +} + +#[async_trait] +impl> LogSinker for LogSinkerOf { + async fn consume_log_and_sink(self, mut log_reader: impl LogReader) -> Result<()> { + let mut sink_writer = self.writer; + let sink_metrics = self.sink_metrics; + #[derive(Debug)] + enum LogConsumerState { + /// Mark that the log consumer is not initialized yet + Uninitialized, + + /// Mark that a new epoch has begun. + EpochBegun { curr_epoch: u64 }, + + /// Mark that the consumer has just received a barrier + BarrierReceived { prev_epoch: u64 }, + } + + let mut state = LogConsumerState::Uninitialized; + + log_reader.init().await?; + + loop { + let (epoch, item): (u64, LogStoreReadItem) = log_reader.next_item().await?; + if let LogStoreReadItem::UpdateVnodeBitmap(_) = &item { + match &state { + LogConsumerState::BarrierReceived { .. } => {} + _ => unreachable!( + "update vnode bitmap can be accepted only right after \ + barrier, but current state is {:?}", + state + ), + } + } + // begin_epoch when not previously began + state = match state { + LogConsumerState::Uninitialized => { + sink_writer.begin_epoch(epoch).await?; + LogConsumerState::EpochBegun { curr_epoch: epoch } + } + LogConsumerState::EpochBegun { curr_epoch } => { + assert!( + epoch >= curr_epoch, + "new epoch {} should not be below the current epoch {}", + epoch, + curr_epoch + ); + LogConsumerState::EpochBegun { curr_epoch: epoch } + } + LogConsumerState::BarrierReceived { prev_epoch } => { + assert!( + epoch > prev_epoch, + "new epoch {} should be greater than prev epoch {}", + epoch, + prev_epoch + ); + sink_writer.begin_epoch(epoch).await?; + LogConsumerState::EpochBegun { curr_epoch: epoch } + } + }; + match item { + LogStoreReadItem::StreamChunk { chunk, .. } => { + if let Err(e) = sink_writer.write_batch(chunk).await { + sink_writer.abort().await?; + return Err(e); + } + } + LogStoreReadItem::Barrier { is_checkpoint } => { + let prev_epoch = match state { + LogConsumerState::EpochBegun { curr_epoch } => curr_epoch, + _ => unreachable!("epoch must have begun before handling barrier"), + }; + if is_checkpoint { + let start_time = Instant::now(); + sink_writer.barrier(true).await?; + sink_metrics + .sink_commit_duration_metrics + .observe(start_time.elapsed().as_millis() as f64); + log_reader + .truncate(TruncateOffset::Barrier { epoch }) + .await?; + } else { + sink_writer.barrier(false).await?; + } + state = LogConsumerState::BarrierReceived { prev_epoch } + } + LogStoreReadItem::UpdateVnodeBitmap(vnode_bitmap) => { + sink_writer.update_vnode_bitmap(vnode_bitmap).await?; + } + } + } + } +} + +#[easy_ext::ext(SinkWriterExt)] +impl T +where + T: SinkWriter + Sized, +{ + pub fn into_log_sinker(self, sink_metrics: SinkMetrics) -> LogSinkerOf { + LogSinkerOf { + writer: self, + sink_metrics, + } + } +} + +pub struct AsyncTruncateLogSinkerOf { + writer: W, + future_manager: DeliveryFutureManager, +} + +impl AsyncTruncateLogSinkerOf { + pub fn new(writer: W, max_future_count: usize) -> Self { + AsyncTruncateLogSinkerOf { + writer, + future_manager: DeliveryFutureManager::new(max_future_count), + } + } +} + +#[async_trait] +impl LogSinker for AsyncTruncateLogSinkerOf { + async fn consume_log_and_sink(mut self, mut log_reader: impl LogReader) -> Result<()> { + log_reader.init().await?; + loop { + let select_result = drop_either_future( + select( + pin!(log_reader.next_item()), + pin!(self.future_manager.next_truncate_offset()), + ) + .await, + ); + match select_result { + Either::Left(item_result) => { + let (epoch, item) = item_result?; + match item { + LogStoreReadItem::StreamChunk { chunk_id, chunk } => { + let add_future = self.future_manager.start_write_chunk(epoch, chunk_id); + self.writer.write_chunk(chunk, add_future).await?; + } + LogStoreReadItem::Barrier { + is_checkpoint: _is_checkpoint, + } => { + self.future_manager.add_barrier(epoch); + } + LogStoreReadItem::UpdateVnodeBitmap(_) => {} + } + } + Either::Right(offset_result) => { + let offset = offset_result?; + log_reader.truncate(offset).await?; + } + } + } + } +} + +#[easy_ext::ext(AsyncTruncateSinkWriterExt)] +impl T +where + T: AsyncTruncateSinkWriter + Sized, +{ + pub fn into_log_sinker(self, max_future_count: usize) -> AsyncTruncateLogSinkerOf { + AsyncTruncateLogSinkerOf::new(self, max_future_count) + } +} diff --git a/src/connector/src/source/base.rs b/src/connector/src/source/base.rs index 5a94b36a6b8e8..6a8cd12ce9fac 100644 --- a/src/connector/src/source/base.rs +++ b/src/connector/src/source/base.rs @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use anyhow::{anyhow, Result}; use async_trait::async_trait; +use aws_sdk_s3::types::Object; use bytes::Bytes; use enum_as_inner::EnumAsInner; use futures::stream::BoxStream; @@ -27,61 +28,67 @@ use risingwave_common::array::StreamChunk; use risingwave_common::catalog::TableId; use risingwave_common::error::{ErrorSuppressor, RwError}; use risingwave_common::types::{JsonbVal, Scalar}; +use risingwave_pb::catalog::{PbSource, PbStreamSourceInfo}; use risingwave_pb::source::ConnectorSplit; use risingwave_rpc_client::ConnectorClient; +use serde::de::DeserializeOwned; use super::datagen::DatagenMeta; -use super::filesystem::{FsSplit, S3Properties, S3_CONNECTOR}; +use super::filesystem::FsSplit; use super::google_pubsub::GooglePubsubMeta; use super::kafka::KafkaMeta; use super::monitor::SourceMetrics; -use super::nats::enumerator::NatsSplitEnumerator; -use super::nats::source::NatsSplitReader; use super::nexmark::source::message::NexmarkMeta; use crate::parser::ParserConfig; -use crate::source::cdc::{ - CdcProperties, CdcSplitReader, Citus, CitusDebeziumSplitEnumerator, DebeziumCdcSplit, - DebeziumSplitEnumerator, Mysql, MysqlDebeziumSplitEnumerator, Postgres, - PostgresDebeziumSplitEnumerator, CITUS_CDC_CONNECTOR, MYSQL_CDC_CONNECTOR, - POSTGRES_CDC_CONNECTOR, -}; -use crate::source::datagen::{ - DatagenProperties, DatagenSplit, DatagenSplitEnumerator, DatagenSplitReader, DATAGEN_CONNECTOR, -}; -use crate::source::dummy_connector::DummySplitReader; -use crate::source::filesystem::{S3FileReader, S3SplitEnumerator}; -use crate::source::google_pubsub::{ - PubsubProperties, PubsubSplit, PubsubSplitEnumerator, PubsubSplitReader, - GOOGLE_PUBSUB_CONNECTOR, -}; -use crate::source::kafka::enumerator::KafkaSplitEnumerator; -use crate::source::kafka::source::KafkaSplitReader; -use crate::source::kafka::{KafkaProperties, KafkaSplit, KAFKA_CONNECTOR}; -use crate::source::kinesis::enumerator::client::KinesisSplitEnumerator; -use crate::source::kinesis::source::reader::KinesisSplitReader; -use crate::source::kinesis::split::KinesisSplit; -use crate::source::kinesis::{KinesisProperties, KINESIS_CONNECTOR}; +pub(crate) use crate::source::common::CommonSplitReader; +use crate::source::filesystem::{FsPageItem, S3Properties, S3_V2_CONNECTOR}; use crate::source::monitor::EnumeratorMetrics; -use crate::source::nats::split::NatsSplit; -use crate::source::nats::{NatsProperties, NATS_CONNECTOR}; -use crate::source::nexmark::source::reader::NexmarkSplitReader; -use crate::source::nexmark::{ - NexmarkProperties, NexmarkSplit, NexmarkSplitEnumerator, NEXMARK_CONNECTOR, +use crate::source::S3_CONNECTOR; +use crate::{ + dispatch_source_prop, dispatch_split_impl, for_all_sources, impl_connector_properties, + impl_split, match_source_name_str, }; -use crate::source::pulsar::source::reader::PulsarSplitReader; -use crate::source::pulsar::{ - PulsarProperties, PulsarSplit, PulsarSplitEnumerator, PULSAR_CONNECTOR, -}; -use crate::{impl_connector_properties, impl_split, impl_split_enumerator, impl_split_reader}; const SPLIT_TYPE_FIELD: &str = "split_type"; const SPLIT_INFO_FIELD: &str = "split_info"; +const UPSTREAM_SOURCE_KEY: &str = "connector"; + +pub trait TryFromHashmap: Sized { + fn try_from_hashmap(props: HashMap) -> Result; +} + +pub trait SourceProperties: TryFromHashmap + Clone { + const SOURCE_NAME: &'static str; + type Split: SplitMetaData + TryFrom + Into; + type SplitEnumerator: SplitEnumerator; + type SplitReader: SplitReader; + + fn init_from_pb_source(&mut self, _source: &PbSource) {} +} + +impl TryFromHashmap for P { + fn try_from_hashmap(props: HashMap) -> Result { + let json_value = serde_json::to_value(props).map_err(|e| anyhow!(e))?; + serde_json::from_value::

(json_value).map_err(|e| anyhow!(e.to_string())) + } +} + +pub async fn create_split_reader( + prop: P, + splits: Vec, + parser_config: ParserConfig, + source_ctx: SourceContextRef, + columns: Option>, +) -> Result { + let splits = splits.into_iter().map(P::Split::try_from).try_collect()?; + P::SplitReader::new(prop, splits, parser_config, source_ctx, columns).await +} /// [`SplitEnumerator`] fetches the split metadata from the external source service. /// NOTE: It runs in the meta server, so probably it should be moved to the `meta` crate. #[async_trait] pub trait SplitEnumerator: Sized { - type Split: SplitMetaData + Send + Sync; + type Split: SplitMetaData + Send; type Properties; async fn new(properties: Self::Properties, context: SourceEnumeratorContextRef) @@ -246,6 +253,67 @@ impl SourceStruct { } } +// Only return valid (format, encode) +pub fn extract_source_struct(info: &PbStreamSourceInfo) -> Result { + use risingwave_pb::plan_common::{PbEncodeType, PbFormatType, RowFormatType}; + + // old version meta. + if let Ok(format) = info.get_row_format() { + let (format, encode) = match format { + RowFormatType::Json => (SourceFormat::Plain, SourceEncode::Json), + RowFormatType::Protobuf => (SourceFormat::Plain, SourceEncode::Protobuf), + RowFormatType::DebeziumJson => (SourceFormat::Debezium, SourceEncode::Json), + RowFormatType::Avro => (SourceFormat::Plain, SourceEncode::Avro), + RowFormatType::Maxwell => (SourceFormat::Maxwell, SourceEncode::Json), + RowFormatType::CanalJson => (SourceFormat::Canal, SourceEncode::Json), + RowFormatType::Csv => (SourceFormat::Plain, SourceEncode::Csv), + RowFormatType::Native => (SourceFormat::Native, SourceEncode::Native), + RowFormatType::DebeziumAvro => (SourceFormat::Debezium, SourceEncode::Avro), + RowFormatType::UpsertJson => (SourceFormat::Upsert, SourceEncode::Json), + RowFormatType::UpsertAvro => (SourceFormat::Upsert, SourceEncode::Avro), + RowFormatType::DebeziumMongoJson => (SourceFormat::DebeziumMongo, SourceEncode::Json), + RowFormatType::Bytes => (SourceFormat::Plain, SourceEncode::Bytes), + RowFormatType::RowUnspecified => unreachable!(), + }; + return Ok(SourceStruct::new(format, encode)); + } + let source_format = info.get_format().map_err(|e| anyhow!("{e:?}"))?; + let source_encode = info.get_row_encode().map_err(|e| anyhow!("{e:?}"))?; + let (format, encode) = match (source_format, source_encode) { + (PbFormatType::Plain, PbEncodeType::Json) => (SourceFormat::Plain, SourceEncode::Json), + (PbFormatType::Plain, PbEncodeType::Protobuf) => { + (SourceFormat::Plain, SourceEncode::Protobuf) + } + (PbFormatType::Debezium, PbEncodeType::Json) => { + (SourceFormat::Debezium, SourceEncode::Json) + } + (PbFormatType::Plain, PbEncodeType::Avro) => (SourceFormat::Plain, SourceEncode::Avro), + (PbFormatType::Maxwell, PbEncodeType::Json) => (SourceFormat::Maxwell, SourceEncode::Json), + (PbFormatType::Canal, PbEncodeType::Json) => (SourceFormat::Canal, SourceEncode::Json), + (PbFormatType::Plain, PbEncodeType::Csv) => (SourceFormat::Plain, SourceEncode::Csv), + (PbFormatType::Native, PbEncodeType::Native) => { + (SourceFormat::Native, SourceEncode::Native) + } + (PbFormatType::Debezium, PbEncodeType::Avro) => { + (SourceFormat::Debezium, SourceEncode::Avro) + } + (PbFormatType::Upsert, PbEncodeType::Json) => (SourceFormat::Upsert, SourceEncode::Json), + (PbFormatType::Upsert, PbEncodeType::Avro) => (SourceFormat::Upsert, SourceEncode::Avro), + (PbFormatType::DebeziumMongo, PbEncodeType::Json) => { + (SourceFormat::DebeziumMongo, SourceEncode::Json) + } + (PbFormatType::Plain, PbEncodeType::Bytes) => (SourceFormat::Plain, SourceEncode::Bytes), + (format, encode) => { + return Err(anyhow!( + "Unsupported combination of format {:?} and encode {:?}", + format, + encode + )); + } + }; + Ok(SourceStruct::new(format, encode)) +} + pub type BoxSourceStream = BoxStream<'static, Result>>; pub trait SourceWithStateStream = @@ -276,12 +344,13 @@ impl From for StreamChunkWithState { /// responsible for parsing, it is used to read messages from the outside and transform them into a /// stream of parsed [`StreamChunk`] #[async_trait] -pub trait SplitReader: Sized { +pub trait SplitReader: Sized + Send { type Properties; + type Split: SplitMetaData; async fn new( properties: Self::Properties, - state: Vec, + state: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, columns: Option>, @@ -290,41 +359,106 @@ pub trait SplitReader: Sized { fn into_stream(self) -> BoxSourceWithStateStream; } -#[derive(Clone, Debug)] -pub enum ConnectorProperties { - Kafka(Box), - Pulsar(Box), - Kinesis(Box), - Nexmark(Box), - Datagen(Box), - S3(Box), - MysqlCdc(Box>), - PostgresCdc(Box>), - CitusCdc(Box>), - GooglePubsub(Box), - Nats(Box), - Dummy(Box<()>), +for_all_sources!(impl_connector_properties); + +impl ConnectorProperties { + pub fn is_new_fs_connector_b_tree_map(props: &BTreeMap) -> bool { + props + .get(UPSTREAM_SOURCE_KEY) + .map(|s| s.eq_ignore_ascii_case(S3_V2_CONNECTOR)) + .unwrap_or(false) + } + + pub fn is_new_fs_connector_hash_map(props: &HashMap) -> bool { + props + .get(UPSTREAM_SOURCE_KEY) + .map(|s| s.eq_ignore_ascii_case(S3_V2_CONNECTOR)) + .unwrap_or(false) + } + + pub fn rewrite_upstream_source_key_hash_map(props: &mut HashMap) { + let connector = props.remove(UPSTREAM_SOURCE_KEY).unwrap(); + match connector.as_str() { + S3_V2_CONNECTOR => { + tracing::info!( + "using new fs source, rewrite connector from '{}' to '{}'", + S3_V2_CONNECTOR, + S3_CONNECTOR + ); + props.insert(UPSTREAM_SOURCE_KEY.to_string(), S3_CONNECTOR.to_string()); + } + _ => { + props.insert(UPSTREAM_SOURCE_KEY.to_string(), connector); + } + } + } } impl ConnectorProperties { + pub fn extract(mut props: HashMap) -> Result { + if Self::is_new_fs_connector_hash_map(&props) { + _ = props + .remove(UPSTREAM_SOURCE_KEY) + .ok_or_else(|| anyhow!("Must specify 'connector' in WITH clause"))?; + return Ok(ConnectorProperties::S3(Box::new( + S3Properties::try_from_hashmap(props)?, + ))); + } + + let connector = props + .remove(UPSTREAM_SOURCE_KEY) + .ok_or_else(|| anyhow!("Must specify 'connector' in WITH clause"))?; + match_source_name_str!( + connector.to_lowercase().as_str(), + PropType, + PropType::try_from_hashmap(props).map(ConnectorProperties::from), + |other| Err(anyhow!("connector '{}' is not supported", other)) + ) + } + + pub fn enable_split_scale_in(&self) -> bool { + // enable split scale in just for Kinesis + matches!(self, ConnectorProperties::Kinesis(_)) + } + + pub fn init_from_pb_source(&mut self, source: &PbSource) { + dispatch_source_prop!(self, prop, prop.init_from_pb_source(source)) + } + pub fn support_multiple_splits(&self) -> bool { matches!(self, ConnectorProperties::Kafka(_)) } } -#[derive(Debug, Clone, EnumAsInner, PartialEq, Hash)] -pub enum SplitImpl { - Kafka(KafkaSplit), - Pulsar(PulsarSplit), - Kinesis(KinesisSplit), - Nexmark(NexmarkSplit), - Datagen(DatagenSplit), - GooglePubsub(PubsubSplit), - MysqlCdc(DebeziumCdcSplit), - PostgresCdc(DebeziumCdcSplit), - CitusCdc(DebeziumCdcSplit), - Nats(NatsSplit), - S3(FsSplit), +for_all_sources!(impl_split); + +impl From<&SplitImpl> for ConnectorSplit { + fn from(split: &SplitImpl) -> Self { + dispatch_split_impl!(split, inner, SourcePropType, { + ConnectorSplit { + split_type: String::from(SourcePropType::SOURCE_NAME), + encoded_split: inner.encode_to_bytes().to_vec(), + } + }) + } +} + +impl TryFrom<&ConnectorSplit> for SplitImpl { + type Error = anyhow::Error; + + fn try_from(split: &ConnectorSplit) -> std::result::Result { + match_source_name_str!( + split.split_type.to_lowercase().as_str(), + PropType, + { + ::Split::restore_from_bytes( + split.encoded_split.as_ref(), + ) + .map(Into::into) + }, + |other| Err(anyhow!("connector '{}' is not supported", other)) + ) + } } // for the `FsSourceExecutor` @@ -345,87 +479,68 @@ impl SplitImpl { } } -pub enum SplitReaderImpl { - S3(Box), - Dummy(Box), - Kinesis(Box), - Kafka(Box), - Nexmark(Box), - Pulsar(Box), - Datagen(Box), - MysqlCdc(Box>), - PostgresCdc(Box>), - CitusCdc(Box>), - GooglePubsub(Box), - Nats(Box), -} - -pub enum SplitEnumeratorImpl { - Kafka(KafkaSplitEnumerator), - Pulsar(PulsarSplitEnumerator), - Kinesis(KinesisSplitEnumerator), - Nexmark(NexmarkSplitEnumerator), - Datagen(DatagenSplitEnumerator), - MysqlCdc(MysqlDebeziumSplitEnumerator), - PostgresCdc(PostgresDebeziumSplitEnumerator), - CitusCdc(CitusDebeziumSplitEnumerator), - GooglePubsub(PubsubSplitEnumerator), - S3(S3SplitEnumerator), - Nats(NatsSplitEnumerator), -} - -impl_connector_properties! { - { Kafka, KAFKA_CONNECTOR }, - { Pulsar, PULSAR_CONNECTOR }, - { Kinesis, KINESIS_CONNECTOR }, - { Nexmark, NEXMARK_CONNECTOR }, - { Datagen, DATAGEN_CONNECTOR }, - { S3, S3_CONNECTOR }, - { GooglePubsub, GOOGLE_PUBSUB_CONNECTOR}, - { Nats, NATS_CONNECTOR } -} - -impl_split_enumerator! { - { Kafka, KafkaSplitEnumerator }, - { Pulsar, PulsarSplitEnumerator }, - { Kinesis, KinesisSplitEnumerator }, - { Nexmark, NexmarkSplitEnumerator }, - { Datagen, DatagenSplitEnumerator }, - { MysqlCdc, DebeziumSplitEnumerator }, - { PostgresCdc, DebeziumSplitEnumerator }, - { CitusCdc, DebeziumSplitEnumerator }, - { GooglePubsub, PubsubSplitEnumerator}, - { S3, S3SplitEnumerator }, - { Nats, NatsSplitEnumerator } -} - -impl_split! { - { Kafka, KAFKA_CONNECTOR, KafkaSplit }, - { Pulsar, PULSAR_CONNECTOR, PulsarSplit }, - { Kinesis, KINESIS_CONNECTOR, KinesisSplit }, - { Nexmark, NEXMARK_CONNECTOR, NexmarkSplit }, - { Datagen, DATAGEN_CONNECTOR, DatagenSplit }, - { GooglePubsub, GOOGLE_PUBSUB_CONNECTOR, PubsubSplit }, - { MysqlCdc, MYSQL_CDC_CONNECTOR, DebeziumCdcSplit }, - { PostgresCdc, POSTGRES_CDC_CONNECTOR, DebeziumCdcSplit }, - { CitusCdc, CITUS_CDC_CONNECTOR, DebeziumCdcSplit }, - { S3, S3_CONNECTOR, FsSplit }, - { Nats, NATS_CONNECTOR, NatsSplit } -} - -impl_split_reader! { - { S3, S3FileReader }, - { Kafka, KafkaSplitReader }, - { Pulsar, PulsarSplitReader }, - { Kinesis, KinesisSplitReader }, - { Nexmark, NexmarkSplitReader }, - { Datagen, DatagenSplitReader }, - { MysqlCdc, CdcSplitReader}, - { PostgresCdc, CdcSplitReader}, - { CitusCdc, CdcSplitReader }, - { GooglePubsub, PubsubSplitReader }, - { Nats, NatsSplitReader }, - { Dummy, DummySplitReader } +impl SplitImpl { + fn restore_from_json_inner(split_type: &str, value: JsonbVal) -> Result { + match_source_name_str!( + split_type.to_lowercase().as_str(), + PropType, + ::Split::restore_from_json(value).map(Into::into), + |other| Err(anyhow!("connector '{}' is not supported", other)) + ) + } +} + +impl SplitMetaData for SplitImpl { + fn id(&self) -> SplitId { + dispatch_split_impl!(self, inner, IgnoreType, inner.id()) + } + + fn encode_to_json(&self) -> JsonbVal { + use serde_json::json; + let inner = self.encode_to_json_inner().take(); + json!({ SPLIT_TYPE_FIELD: self.get_type(), SPLIT_INFO_FIELD: inner}).into() + } + + fn restore_from_json(value: JsonbVal) -> Result { + let mut value = value.take(); + let json_obj = value.as_object_mut().unwrap(); + let split_type = json_obj + .remove(SPLIT_TYPE_FIELD) + .unwrap() + .as_str() + .unwrap() + .to_string(); + let inner_value = json_obj.remove(SPLIT_INFO_FIELD).unwrap(); + Self::restore_from_json_inner(&split_type, inner_value.into()) + } + + fn update_with_offset(&mut self, start_offset: String) -> Result<()> { + dispatch_split_impl!( + self, + inner, + IgnoreType, + inner.update_with_offset(start_offset) + ) + } +} + +impl SplitImpl { + pub fn get_type(&self) -> String { + dispatch_split_impl!(self, _ignored, PropType, { + PropType::SOURCE_NAME.to_string() + }) + } + + pub fn update_in_place(&mut self, start_offset: String) -> Result<()> { + dispatch_split_impl!(self, inner, IgnoreType, { + inner.update_with_offset(start_offset)? + }); + Ok(()) + } + + pub fn encode_to_json_inner(&self) -> JsonbVal { + dispatch_split_impl!(self, inner, IgnoreType, inner.encode_to_json()) + } } pub type DataType = risingwave_common::types::DataType; @@ -486,21 +601,34 @@ pub trait SplitMetaData: Sized { fn encode_to_json(&self) -> JsonbVal; fn restore_from_json(value: JsonbVal) -> Result; + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()>; } /// [`ConnectorState`] maintains the consuming splits' info. In specific split readers, /// `ConnectorState` cannot be [`None`] and contains one(for mq split readers) or many(for fs /// split readers) [`SplitImpl`]. If no split is assigned to source executor, `ConnectorState` is -/// [`None`] and [`DummySplitReader`] is up instead of other split readers. +/// [`None`] and the created source stream will be a pending stream. pub type ConnectorState = Option>; +#[derive(Debug, Clone, Default)] +pub struct FsFilterCtrlCtx; +pub type FsFilterCtrlCtxRef = Arc; + +#[async_trait] +pub trait FsListInner: Sized { + // fixme: better to implement as an Iterator, but the last page still have some contents + async fn get_next_page From<&'a Object>>(&mut self) -> Result<(Vec, bool)>; + fn filter_policy(&self, ctx: &FsFilterCtrlCtx, page_num: usize, item: &FsPageItem) -> bool; +} + #[cfg(test)] mod tests { use maplit::*; use nexmark::event::EventType; use super::*; - use crate::source::cdc::MySqlCdcSplit; + use crate::source::cdc::{DebeziumCdcSplit, MySqlCdcSplit}; + use crate::source::kafka::KafkaSplit; #[test] fn test_split_impl_get_fn() -> Result<()> { diff --git a/src/connector/src/source/cdc/enumerator/mod.rs b/src/connector/src/source/cdc/enumerator/mod.rs index 1c689026e568b..e88440bc876e1 100644 --- a/src/connector/src/source/cdc/enumerator/mod.rs +++ b/src/connector/src/source/cdc/enumerator/mod.rs @@ -13,13 +13,17 @@ // limitations under the License. use std::marker::PhantomData; +use std::ops::Deref; use std::str::FromStr; use anyhow::anyhow; use async_trait::async_trait; use itertools::Itertools; +use jni::objects::{JByteArray, JValue, JValueOwned}; +use prost::Message; use risingwave_common::util::addr::HostAddr; -use risingwave_pb::connector_service::SourceType; +use risingwave_jni_core::jvm_runtime::JVM; +use risingwave_pb::connector_service::{SourceType, ValidateSourceRequest, ValidateSourceResponse}; use crate::source::cdc::{ CdcProperties, CdcSourceTypeTrait, CdcSplitBase, Citus, DebeziumCdcSplit, MySqlCdcSplit, Mysql, @@ -49,10 +53,6 @@ where props: CdcProperties, context: SourceEnumeratorContextRef, ) -> anyhow::Result { - let connector_client = context.connector_client.clone().ok_or_else(|| { - anyhow!("connector node endpoint not specified or unable to connect to connector node") - })?; - let server_addrs = props .props .get(DATABASE_SERVERS_KEY) @@ -69,15 +69,42 @@ where SourceType::from(T::source_type()) ); + let mut env = JVM.get_or_init()?.attach_current_thread()?; + + let validate_source_request = ValidateSourceRequest { + source_id: context.info.source_id as u64, + source_type: props.get_source_type_pb() as _, + properties: props.props, + table_schema: Some(props.table_schema), + }; + + let validate_source_request_bytes = + env.byte_array_from_slice(&Message::encode_to_vec(&validate_source_request))?; + // validate connector properties - connector_client - .validate_source_properties( - context.info.source_id as u64, - props.get_source_type_pb(), - props.props, - Some(props.table_schema), - ) - .await?; + let response = env.call_static_method( + "com/risingwave/connector/source/JniSourceValidateHandler", + "validate", + "([B)[B", + &[JValue::Object(&validate_source_request_bytes)], + )?; + + let validate_source_response_bytes = match response { + JValueOwned::Object(o) => unsafe { JByteArray::from_raw(o.into_raw()) }, + _ => unreachable!(), + }; + + let validate_source_response: ValidateSourceResponse = Message::decode( + risingwave_jni_core::to_guarded_slice(&validate_source_response_bytes, &mut env)? + .deref(), + )?; + + validate_source_response.error.map_or(Ok(()), |err| { + Err(anyhow!(format!( + "source cannot pass validation: {}", + err.error_message + ))) + })?; tracing::debug!("validate cdc source properties success"); Ok(Self { diff --git a/src/connector/src/source/cdc/mod.rs b/src/connector/src/source/cdc/mod.rs index 86a8b16adec02..1d795a7141e84 100644 --- a/src/connector/src/source/cdc/mod.rs +++ b/src/connector/src/source/cdc/mod.rs @@ -19,14 +19,15 @@ use std::collections::HashMap; use std::marker::PhantomData; pub use enumerator::*; -use paste::paste; +use itertools::Itertools; use risingwave_common::catalog::{ColumnDesc, Field, Schema}; +use risingwave_pb::catalog::PbSource; use risingwave_pb::connector_service::{PbSourceType, PbTableSchema, SourceType, TableSchema}; pub use source::*; pub use split::*; -use crate::impl_cdc_source_type; -use crate::source::ConnectorProperties; +use crate::source::{SourceProperties, SplitImpl, TryFromHashmap}; +use crate::{for_all_classified_sources, impl_cdc_source_type}; pub const CDC_CONNECTOR_NAME_SUFFIX: &str = "-cdc"; @@ -39,7 +40,7 @@ pub trait CdcSourceTypeTrait: Send + Sync + Clone + 'static { fn source_type() -> CdcSourceType; } -impl_cdc_source_type!({ Mysql, "mysql" }, { Postgres, "postgres" }, { Citus, "citus" }); +for_all_classified_sources!(impl_cdc_source_type); #[derive(Clone, Debug, Default)] pub struct CdcProperties { @@ -52,6 +53,53 @@ pub struct CdcProperties { pub _phantom: PhantomData, } +impl TryFromHashmap for CdcProperties { + fn try_from_hashmap(props: HashMap) -> anyhow::Result { + Ok(CdcProperties { + props, + table_schema: Default::default(), + _phantom: PhantomData, + }) + } +} + +impl SourceProperties for CdcProperties +where + DebeziumCdcSplit: TryFrom + Into, + DebeziumSplitEnumerator: ListCdcSplits, +{ + type Split = DebeziumCdcSplit; + type SplitEnumerator = DebeziumSplitEnumerator; + type SplitReader = CdcSplitReader; + + const SOURCE_NAME: &'static str = T::CDC_CONNECTOR_NAME; + + fn init_from_pb_source(&mut self, source: &PbSource) { + let pk_indices = source + .pk_column_ids + .iter() + .map(|&id| { + source + .columns + .iter() + .position(|col| col.column_desc.as_ref().unwrap().column_id == id) + .unwrap() as u32 + }) + .collect_vec(); + + let table_schema = PbTableSchema { + columns: source + .columns + .iter() + .flat_map(|col| &col.column_desc) + .cloned() + .collect(), + pk_indices, + }; + self.table_schema = table_schema; + } +} + impl CdcProperties { pub fn get_source_type_pb(&self) -> SourceType { SourceType::from(T::source_type()) diff --git a/src/connector/src/source/cdc/source/message.rs b/src/connector/src/source/cdc/source/message.rs index 71046df258d63..f3890377e0bc6 100644 --- a/src/connector/src/source/cdc/source/message.rs +++ b/src/connector/src/source/cdc/source/message.rs @@ -21,7 +21,11 @@ impl From for SourceMessage { fn from(message: CdcMessage) -> Self { SourceMessage { key: None, - payload: Some(message.payload.as_bytes().to_vec()), + payload: if message.payload.is_empty() { + None // heartbeat message + } else { + Some(message.payload.as_bytes().to_vec()) + }, offset: message.offset, split_id: message.partition.into(), meta: SourceMeta::Empty, diff --git a/src/connector/src/source/cdc/source/reader.rs b/src/connector/src/source/cdc/source/reader.rs index 974ec8877d2f6..7410834ce1daa 100644 --- a/src/connector/src/source/cdc/source/reader.rs +++ b/src/connector/src/source/cdc/source/reader.rs @@ -16,18 +16,22 @@ use std::str::FromStr; use anyhow::{anyhow, Result}; use async_trait::async_trait; -use futures::pin_mut; use futures_async_stream::try_stream; +use itertools::Itertools; +use jni::objects::JValue; +use prost::Message; use risingwave_common::util::addr::HostAddr; -use risingwave_pb::connector_service::GetEventStreamResponse; +use risingwave_jni_core::jvm_runtime::JVM; +use risingwave_jni_core::GetEventStreamJniSender; +use risingwave_pb::connector_service::{GetEventStreamRequest, GetEventStreamResponse}; +use tokio::sync::mpsc; use crate::parser::ParserConfig; use crate::source::base::SourceMessage; use crate::source::cdc::{CdcProperties, CdcSourceType, CdcSourceTypeTrait, DebeziumCdcSplit}; -use crate::source::common::{into_chunk_stream, CommonSplitReader}; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SplitId, SplitImpl, SplitMetaData, - SplitReader, + into_chunk_stream, BoxSourceWithStateStream, Column, CommonSplitReader, SourceContextRef, + SplitId, SplitMetaData, SplitReader, }; pub struct CdcSplitReader { @@ -44,23 +48,23 @@ pub struct CdcSplitReader { source_ctx: SourceContextRef, } +const DEFAULT_CHANNEL_SIZE: usize = 16; + #[async_trait] -impl SplitReader for CdcSplitReader -where - DebeziumCdcSplit: TryFrom, -{ +impl SplitReader for CdcSplitReader { type Properties = CdcProperties; + type Split = DebeziumCdcSplit; #[allow(clippy::unused_async)] async fn new( conn_props: CdcProperties, - splits: Vec, + splits: Vec>, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, ) -> Result { assert_eq!(splits.len(), 1); - let split = DebeziumCdcSplit::::try_from(splits.into_iter().next().unwrap())?; + let split = splits.into_iter().next().unwrap(); let split_id = split.id(); match T::source_type() { CdcSourceType::Mysql | CdcSourceType::Postgres => Ok(Self { @@ -93,16 +97,9 @@ where } } -impl CommonSplitReader for CdcSplitReader -where - Self: SplitReader, -{ +impl CommonSplitReader for CdcSplitReader { #[try_stream(ok = Vec, error = anyhow::Error)] async fn into_data_stream(self) { - let cdc_client = self.source_ctx.connector_client.clone().ok_or_else(|| { - anyhow!("connector node endpoint not specified or unable to connect to connector node") - })?; - // rewrite the hostname and port for the split let mut properties = self.conn_props.props.clone(); @@ -121,38 +118,59 @@ where properties.insert("table.name".into(), table_name); } - let cdc_stream = cdc_client - .start_source_stream( - self.source_id, - self.conn_props.get_source_type_pb(), - self.start_offset, - properties, - self.snapshot_done, - ) - .await - .inspect_err(|err| tracing::error!("connector node start stream error: {}", err))?; - pin_mut!(cdc_stream); - #[for_await] - for event_res in cdc_stream { - match event_res { - Ok(GetEventStreamResponse { events, .. }) => { - if events.is_empty() { - continue; - } - let mut msgs = Vec::with_capacity(events.len()); - for event in events { - msgs.push(SourceMessage::from(event)); - } - yield msgs; + let (tx, mut rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + + // Force init, because we don't want to see initialization failure in the following thread. + JVM.get_or_init()?; + + let get_event_stream_request = GetEventStreamRequest { + source_id: self.source_id, + source_type: self.conn_props.get_source_type_pb() as _, + start_offset: self.start_offset.unwrap_or_default(), + properties, + snapshot_done: self.snapshot_done, + }; + + let source_id = get_event_stream_request.source_id.to_string(); + let source_type = get_event_stream_request.source_type.to_string(); + + std::thread::spawn(move || { + let mut env = JVM.get_or_init().unwrap().attach_current_thread().unwrap(); + + let get_event_stream_request_bytes = env + .byte_array_from_slice(&Message::encode_to_vec(&get_event_stream_request)) + .unwrap(); + let result = env.call_static_method( + "com/risingwave/connector/source/core/JniDbzSourceHandler", + "runJniDbzSourceThread", + "([BJ)V", + &[ + JValue::Object(&get_event_stream_request_bytes), + JValue::from(&tx as *const GetEventStreamJniSender as i64), + ], + ); + + match result { + Ok(_) => { + tracing::info!("end of jni call runJniDbzSourceThread"); } Err(e) => { - return Err(anyhow!( - "Cdc service error: code {}, msg {}", - e.code(), - e.message() - )) + tracing::error!("jni call error: {:?}", e); } } + }); + + while let Some(GetEventStreamResponse { events, .. }) = rx.recv().await { + tracing::trace!("receive events {:?}", events.len()); + self.source_ctx + .metrics + .connector_source_rows_received + .with_label_values(&[&source_type, &source_id]) + .inc_by(events.len() as u64); + let msgs = events.into_iter().map(SourceMessage::from).collect_vec(); + yield msgs; } + + Err(anyhow!("all senders are dropped"))?; } } diff --git a/src/connector/src/source/cdc/split.rs b/src/connector/src/source/cdc/split.rs index 9d13a3c5a6eac..1041f8adec692 100644 --- a/src/connector/src/source/cdc/split.rs +++ b/src/connector/src/source/cdc/split.rs @@ -74,12 +74,17 @@ impl MySqlCdcSplit { self.inner.split_id ) })?; - snapshot_done = match dbz_offset.source_offset.snapshot { - Some(val) => !val, - None => true, - }; + + // heartbeat event should not update the `snapshot_done` flag + if !dbz_offset.is_heartbeat { + snapshot_done = match dbz_offset.source_offset.snapshot { + Some(val) => !val, + None => true, + }; + } } self.inner.start_offset = Some(start_offset); + // if snapshot_done is already true, it won't be updated self.inner.snapshot_done = snapshot_done; Ok(()) } @@ -109,10 +114,14 @@ impl PostgresCdcSplit { self.inner.split_id ) })?; - snapshot_done = dbz_offset - .source_offset - .last_snapshot_record - .unwrap_or(false); + + // heartbeat event should not update the `snapshot_done` flag + if !dbz_offset.is_heartbeat { + snapshot_done = dbz_offset + .source_offset + .last_snapshot_record + .unwrap_or(false); + } } self.inner.start_offset = Some(start_offset); // if snapshot_done is already true, it won't be updated @@ -132,6 +141,7 @@ pub struct DebeziumCdcSplit { impl SplitMetaData for DebeziumCdcSplit { fn id(&self) -> SplitId { + // TODO: may check T to get the specific cdc type assert!(self.mysql_split.is_some() || self.pg_split.is_some()); if let Some(split) = &self.mysql_split { return format!("{}", split.inner.split_id).into(); @@ -149,6 +159,17 @@ impl SplitMetaData for DebeziumCdcSplit { fn restore_from_json(value: JsonbVal) -> anyhow::Result { serde_json::from_value(value.take()).map_err(|e| anyhow!(e)) } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + // TODO: may check T to get the specific cdc type + assert!(self.mysql_split.is_some() || self.pg_split.is_some()); + if let Some(split) = &mut self.mysql_split { + split.update_with_offset(start_offset)? + } else if let Some(split) = &mut self.pg_split { + split.update_with_offset(start_offset)? + } + Ok(()) + } } impl DebeziumCdcSplit { @@ -196,14 +217,4 @@ impl DebeziumCdcSplit { } unreachable!("invalid debezium split") } - - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - assert!(self.mysql_split.is_some() || self.pg_split.is_some()); - if let Some(split) = &mut self.mysql_split { - split.update_with_offset(start_offset)? - } else if let Some(split) = &mut self.pg_split { - split.update_with_offset(start_offset)? - } - Ok(()) - } } diff --git a/src/connector/src/source/common.rs b/src/connector/src/source/common.rs index 02f1cbde3de38..86ad60cc1b969 100644 --- a/src/connector/src/source/common.rs +++ b/src/connector/src/source/common.rs @@ -27,7 +27,7 @@ pub(crate) trait CommonSplitReader: SplitReader + 'static { #[try_stream(boxed, ok = StreamChunkWithState, error = RwError)] pub(crate) async fn into_chunk_stream( - reader: impl CommonSplitReader + Send, + reader: impl CommonSplitReader, parser_config: ParserConfig, source_ctx: SourceContextRef, ) { diff --git a/src/connector/src/source/datagen/mod.rs b/src/connector/src/source/datagen/mod.rs index c0d9717db5366..af2dd2c388e92 100644 --- a/src/connector/src/source/datagen/mod.rs +++ b/src/connector/src/source/datagen/mod.rs @@ -24,6 +24,8 @@ use serde_with::{serde_as, DisplayFromStr}; pub use source::*; pub use split::*; +use crate::source::SourceProperties; + pub const DATAGEN_CONNECTOR: &str = "datagen"; #[serde_as] @@ -55,6 +57,14 @@ pub struct DatagenProperties { fields: HashMap, } +impl SourceProperties for DatagenProperties { + type Split = DatagenSplit; + type SplitEnumerator = DatagenSplitEnumerator; + type SplitReader = DatagenSplitReader; + + const SOURCE_NAME: &'static str = DATAGEN_CONNECTOR; +} + fn default_rows_per_second() -> u64 { 10 } diff --git a/src/connector/src/source/datagen/source/generator.rs b/src/connector/src/source/datagen/source/generator.rs index c073403a7565d..3e42b07ce4422 100644 --- a/src/connector/src/source/datagen/source/generator.rs +++ b/src/connector/src/source/datagen/source/generator.rs @@ -23,10 +23,8 @@ use risingwave_common::row::OwnedRow; use risingwave_common::types::DataType; use risingwave_common::util::iter_util::ZipEqFast; -use crate::source::{ - SourceEncode, SourceFormat, SourceMessage, SourceMeta, SourceStruct, SplitId, - StreamChunkWithState, -}; +use crate::parser::{EncodingProperties, ProtocolProperties, SpecificParserConfig}; +use crate::source::{SourceMessage, SourceMeta, SplitId, StreamChunkWithState}; pub enum FieldDesc { // field is invisible, generate None @@ -38,7 +36,7 @@ pub struct DatagenEventGenerator { // fields_map: HashMap, field_names: Vec, fields_vec: Vec, - source_struct: SourceStruct, + source_format: SpecificParserConfig, data_types: Vec, offset: u64, split_id: SplitId, @@ -56,7 +54,7 @@ impl DatagenEventGenerator { pub fn new( fields_vec: Vec, field_names: Vec, - source_struct: SourceStruct, + source_format: SpecificParserConfig, data_types: Vec, rows_per_second: u64, offset: u64, @@ -72,7 +70,7 @@ impl DatagenEventGenerator { Ok(Self { field_names, fields_vec, - source_struct, + source_format, data_types, offset, split_id, @@ -96,8 +94,11 @@ impl DatagenEventGenerator { ); let mut msgs = Vec::with_capacity(num_rows_to_generate as usize); 'outer: for _ in 0..num_rows_to_generate { - let payload = match (self.source_struct.format, self.source_struct.encode) { - (SourceFormat::Plain, SourceEncode::Json) => { + let payload = match ( + &self.source_format.protocol_config, + &self.source_format.encoding_config, + ) { + (ProtocolProperties::Plain, EncodingProperties::Json(_)) => { let mut map = serde_json::Map::with_capacity(self.fields_vec.len()); for (name, field_generator) in self .field_names @@ -225,7 +226,6 @@ mod tests { use futures::stream::StreamExt; use super::*; - use crate::source::SourceEncode; async fn check_sequence_partition_result( split_num: u64, @@ -266,7 +266,13 @@ mod tests { let generator = DatagenEventGenerator::new( fields_vec, vec!["c1".to_owned(), "c2".to_owned()], - SourceStruct::new(SourceFormat::Plain, SourceEncode::Json), + SpecificParserConfig { + protocol_config: ProtocolProperties::Plain, + encoding_config: EncodingProperties::Json(crate::parser::JsonProperties { + use_schema_registry: false, + }), + key_encoding_config: None, + }, data_types, rows_per_second, 0, diff --git a/src/connector/src/source/datagen/source/reader.rs b/src/connector/src/source/datagen/source/reader.rs index 30d1bfae10c4e..bd9f74ee3aa9a 100644 --- a/src/connector/src/source/datagen/source/reader.rs +++ b/src/connector/src/source/datagen/source/reader.rs @@ -21,13 +21,12 @@ use risingwave_common::field_generator::{FieldGeneratorImpl, VarcharProperty}; use super::generator::DatagenEventGenerator; use crate::parser::{EncodingProperties, ParserConfig, ProtocolProperties}; -use crate::source::common::{into_chunk_stream, CommonSplitReader}; use crate::source::data_gen_util::spawn_data_generation_stream; use crate::source::datagen::source::SEQUENCE_FIELD_KIND; use crate::source::datagen::{DatagenProperties, DatagenSplit, FieldDesc}; use crate::source::{ - BoxSourceWithStateStream, Column, DataType, SourceContextRef, SourceMessage, SplitId, - SplitImpl, SplitMetaData, SplitReader, + into_chunk_stream, BoxSourceWithStateStream, Column, CommonSplitReader, DataType, + SourceContextRef, SourceMessage, SplitId, SplitMetaData, SplitReader, }; pub struct DatagenSplitReader { @@ -42,16 +41,16 @@ pub struct DatagenSplitReader { #[async_trait] impl SplitReader for DatagenSplitReader { type Properties = DatagenProperties; + type Split = DatagenSplit; #[allow(clippy::unused_async)] async fn new( properties: DatagenProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, columns: Option>, ) -> Result { - let mut assigned_split = DatagenSplit::default(); let mut events_so_far = u64::default(); tracing::debug!("Splits for datagen found! {:?}", splits); @@ -59,14 +58,12 @@ impl SplitReader for DatagenSplitReader { let split = splits.into_iter().next().unwrap(); // TODO: currently, assume there's only on split in one reader let split_id = split.id(); - if let SplitImpl::Datagen(n) = split { - if let Some(s) = n.start_offset { - // start_offset in `SplitImpl` indicates the latest successfully generated - // index, so here we use start_offset+1 - events_so_far = s + 1; - }; - assigned_split = n; - } + let assigned_split = split; + if let Some(s) = assigned_split.start_offset { + // start_offset in `SplitImpl` indicates the latest successfully generated + // index, so here we use start_offset+1 + events_so_far = s + 1; + }; let split_index = assigned_split.split_index as u64; let split_num = assigned_split.split_num as u64; @@ -123,7 +120,7 @@ impl SplitReader for DatagenSplitReader { let generator = DatagenEventGenerator::new( fields_vec, field_names, - parser_config.specific.get_source_struct(), + parser_config.specific.clone(), data_types, rows_per_second, events_so_far, @@ -180,7 +177,7 @@ impl CommonSplitReader for DatagenSplitReader { fn into_data_stream(self) -> impl Stream, anyhow::Error>> { // Will buffer at most 4 event chunks. const BUFFER_SIZE: usize = 4; - spawn_data_generation_stream(self.generator.into_msg_stream(), BUFFER_SIZE).boxed() + spawn_data_generation_stream(self.generator.into_msg_stream(), BUFFER_SIZE) } } @@ -346,11 +343,11 @@ mod tests { is_visible: true, }, ]; - let state = vec![SplitImpl::Datagen(DatagenSplit { + let state = vec![DatagenSplit { split_index: 0, split_num: 1, start_offset: None, - })]; + }]; let properties = DatagenProperties { split_num: None, rows_per_second: 10, @@ -424,11 +421,11 @@ mod tests { is_visible: true, }, ]; - let state = vec![SplitImpl::Datagen(DatagenSplit { + let state = vec![DatagenSplit { split_index: 0, split_num: 1, start_offset: None, - })]; + }]; let properties = DatagenProperties { split_num: None, rows_per_second: 10, @@ -454,11 +451,11 @@ mod tests { let v1 = stream.skip(1).next().await.unwrap()?; - let state = vec![SplitImpl::Datagen(DatagenSplit { + let state = vec![DatagenSplit { split_index: 0, split_num: 1, start_offset: Some(9), - })]; + }]; let mut stream = DatagenSplitReader::new( properties, state, diff --git a/src/connector/src/source/datagen/split.rs b/src/connector/src/source/datagen/split.rs index 08babee97fcb9..617b933728837 100644 --- a/src/connector/src/source/datagen/split.rs +++ b/src/connector/src/source/datagen/split.rs @@ -39,6 +39,11 @@ impl SplitMetaData for DatagenSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + self.start_offset = Some(start_offset.as_str().parse::().unwrap()); + Ok(()) + } } impl DatagenSplit { @@ -49,9 +54,4 @@ impl DatagenSplit { start_offset, } } - - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - self.start_offset = Some(start_offset.as_str().parse::().unwrap()); - Ok(()) - } } diff --git a/src/connector/src/source/dummy_connector.rs b/src/connector/src/source/dummy_connector.rs deleted file mode 100644 index 3a5b8922fd29a..0000000000000 --- a/src/connector/src/source/dummy_connector.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use anyhow::Result; -use async_trait::async_trait; -use futures::StreamExt; - -use super::{SourceContextRef, SplitImpl, SplitReader}; -use crate::parser::ParserConfig; -use crate::source::{BoxSourceWithStateStream, Column}; - -/// [`DummySplitReader`] is a placeholder for source executor that is assigned no split. It will -/// wait forever when calling `next`. -#[derive(Clone, Debug)] -pub struct DummySplitReader; - -#[async_trait] -impl SplitReader for DummySplitReader { - type Properties = (); - - async fn new( - _properties: Self::Properties, - _state: Vec, - _parser_config: ParserConfig, - _source_ctx: SourceContextRef, - _columns: Option>, - ) -> Result { - Ok(Self {}) - } - - fn into_stream(self) -> BoxSourceWithStateStream { - futures::stream::pending().boxed() - } -} diff --git a/src/connector/src/source/external.rs b/src/connector/src/source/external.rs index 3b66a06132b26..9eff3991a4d4a 100644 --- a/src/connector/src/source/external.rs +++ b/src/connector/src/source/external.rs @@ -160,6 +160,8 @@ pub struct DebeziumOffset { pub source_partition: HashMap, #[serde(rename = "sourceOffset")] pub source_offset: DebeziumSourceOffset, + #[serde(rename = "isHeartbeat")] + pub is_heartbeat: bool, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] @@ -200,13 +202,9 @@ impl MySqlOffset { } pub trait ExternalTableReader { - type CdcOffsetFuture<'a>: Future> + Send + 'a - where - Self: 'a; - fn get_normalized_table_name(&self, table_name: &SchemaTableName) -> String; - fn current_cdc_offset(&self) -> Self::CdcOffsetFuture<'_>; + fn current_cdc_offset(&self) -> impl Future> + Send + '_; fn parse_binlog_offset(&self, offset: &str) -> ConnectorResult; @@ -248,32 +246,28 @@ pub struct ExternalTableConfig { } impl ExternalTableReader for MySqlExternalTableReader { - type CdcOffsetFuture<'a> = impl Future> + 'a; - fn get_normalized_table_name(&self, table_name: &SchemaTableName) -> String { format!("`{}`", table_name.table_name) } - fn current_cdc_offset(&self) -> Self::CdcOffsetFuture<'_> { - async move { - let mut conn = self - .pool - .get_conn() - .await - .map_err(|e| ConnectorError::Connection(anyhow!(e)))?; - - let sql = "SHOW MASTER STATUS".to_string(); - let mut rs = conn.query::(sql).await?; - let row = rs - .iter_mut() - .exactly_one() - .map_err(|e| ConnectorError::Internal(anyhow!("read binlog error: {}", e)))?; - - Ok(CdcOffset::MySql(MySqlOffset { - filename: row.take("File").unwrap(), - position: row.take("Position").unwrap(), - })) - } + async fn current_cdc_offset(&self) -> ConnectorResult { + let mut conn = self + .pool + .get_conn() + .await + .map_err(|e| ConnectorError::Connection(anyhow!(e)))?; + + let sql = "SHOW MASTER STATUS".to_string(); + let mut rs = conn.query::(sql).await?; + let row = rs + .iter_mut() + .exactly_one() + .map_err(|e| ConnectorError::Internal(anyhow!("read binlog error: {}", e)))?; + + Ok(CdcOffset::MySql(MySqlOffset { + filename: row.take("File").unwrap(), + position: row.take("Position").unwrap(), + })) } fn parse_binlog_offset(&self, offset: &str) -> ConnectorResult { @@ -478,8 +472,6 @@ impl MySqlExternalTableReader { } impl ExternalTableReader for ExternalTableReaderImpl { - type CdcOffsetFuture<'a> = impl Future> + 'a; - fn get_normalized_table_name(&self, table_name: &SchemaTableName) -> String { match self { ExternalTableReaderImpl::MySql(mysql) => mysql.get_normalized_table_name(table_name), @@ -487,12 +479,10 @@ impl ExternalTableReader for ExternalTableReaderImpl { } } - fn current_cdc_offset(&self) -> Self::CdcOffsetFuture<'_> { - async move { - match self { - ExternalTableReaderImpl::MySql(mysql) => mysql.current_cdc_offset().await, - ExternalTableReaderImpl::Mock(mock) => mock.current_cdc_offset().await, - } + async fn current_cdc_offset(&self) -> ConnectorResult { + match self { + ExternalTableReaderImpl::MySql(mysql) => mysql.current_cdc_offset().await, + ExternalTableReaderImpl::Mock(mock) => mock.current_cdc_offset().await, } } @@ -566,11 +556,11 @@ mod tests { #[test] fn test_mysql_binlog_offset() { - let off0_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000001", "pos": 105622, "snapshot": true } }"#; - let off1_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000007", "pos": 1062363217, "snapshot": true } }"#; - let off2_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000007", "pos": 659687560, "snapshot": true } }"#; - let off3_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000008", "pos": 7665875, "snapshot": true } }"#; - let off4_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000008", "pos": 7665875, "snapshot": true } }"#; + let off0_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000001", "pos": 105622, "snapshot": true }, "isHeartbeat": false }"#; + let off1_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000007", "pos": 1062363217, "snapshot": true }, "isHeartbeat": false }"#; + let off2_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000007", "pos": 659687560, "snapshot": true }, "isHeartbeat": false }"#; + let off3_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000008", "pos": 7665875, "snapshot": true }, "isHeartbeat": false }"#; + let off4_str = r#"{ "sourcePartition": { "server": "test" }, "sourceOffset": { "ts_sec": 1670876905, "file": "binlog.000008", "pos": 7665875, "snapshot": true }, "isHeartbeat": false }"#; let off0 = CdcOffset::MySql(MySqlOffset::parse_str(off0_str).unwrap()); let off1 = CdcOffset::MySql(MySqlOffset::parse_str(off1_str).unwrap()); @@ -597,8 +587,9 @@ mod tests { ColumnDesc::unnamed(ColumnId::new(3), DataType::Varchar), ColumnDesc::unnamed(ColumnId::new(4), DataType::Date), ], - pk_indices: vec![0], + downstream_pk: vec![0], sink_type: SinkType::AppendOnly, + format_desc: None, db_name: "db".into(), sink_from_name: "table".into(), }; diff --git a/src/connector/src/source/filesystem/file_common.rs b/src/connector/src/source/filesystem/file_common.rs index 4711a5080c5ae..85d65f40a3479 100644 --- a/src/connector/src/source/filesystem/file_common.rs +++ b/src/connector/src/source/filesystem/file_common.rs @@ -12,7 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. use anyhow::anyhow; -use risingwave_common::types::JsonbVal; +use aws_sdk_s3::types::Object; +use risingwave_common::types::{JsonbVal, Timestamp}; use serde::{Deserialize, Serialize}; use crate::source::{SplitId, SplitMetaData}; @@ -26,6 +27,16 @@ pub struct FsSplit { pub size: usize, } +impl From<&Object> for FsSplit { + fn from(value: &Object) -> Self { + Self { + name: value.key().unwrap().to_owned(), + offset: 0, + size: value.size() as usize, + } + } +} + impl SplitMetaData for FsSplit { fn id(&self) -> SplitId { self.name.as_str().into() @@ -38,6 +49,12 @@ impl SplitMetaData for FsSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + let offset = start_offset.parse().unwrap(); + self.offset = offset; + Ok(()) + } } impl FsSplit { @@ -48,10 +65,24 @@ impl FsSplit { size, } } +} - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - let offset = start_offset.parse().unwrap(); - self.offset = offset; - Ok(()) +#[derive(Clone, Debug)] +pub struct FsPageItem { + pub name: String, + pub size: i64, + pub timestamp: Timestamp, +} + +pub type FsPage = Vec; + +impl From<&Object> for FsPageItem { + fn from(value: &Object) -> Self { + let aws_ts = value.last_modified().unwrap(); + Self { + name: value.key().unwrap().to_owned(), + size: value.size(), + timestamp: Timestamp::from_timestamp_uncheck(aws_ts.secs(), aws_ts.subsec_nanos()), + } } } diff --git a/src/connector/src/source/filesystem/mod.rs b/src/connector/src/source/filesystem/mod.rs index 729fb376ecc6e..8f2587384280b 100644 --- a/src/connector/src/source/filesystem/mod.rs +++ b/src/connector/src/source/filesystem/mod.rs @@ -16,5 +16,7 @@ pub use s3::{S3FileReader, S3Properties, S3SplitEnumerator, S3_CONNECTOR}; mod file_common; pub mod nd_streaming; -pub use file_common::FsSplit; +pub use file_common::{FsPage, FsPageItem, FsSplit}; mod s3; +pub mod s3_v2; +pub const S3_V2_CONNECTOR: &str = "s3_v2"; diff --git a/src/connector/src/source/filesystem/nd_streaming.rs b/src/connector/src/source/filesystem/nd_streaming.rs index e1f5e88eda01e..5574063ffc855 100644 --- a/src/connector/src/source/filesystem/nd_streaming.rs +++ b/src/connector/src/source/filesystem/nd_streaming.rs @@ -12,25 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::io::BufRead; - +use anyhow::anyhow; use bytes::BytesMut; +use futures::io::Cursor; +use futures::AsyncBufReadExt; use futures_async_stream::try_stream; use crate::source::{BoxSourceStream, SourceMessage}; #[try_stream(boxed, ok = Vec, error = anyhow::Error)] -/// This function splits a byte stream by the newline character '\n' into a message stream. +/// This function splits a byte stream by the newline separator "(\r)\n" into a message stream. /// It can be difficult to split and compute offsets correctly when the bytes are received in /// chunks. There are two cases to consider: -/// - When a bytes chunk does not end with '\n', we should not treat the last segment as a new line -/// message, but keep it for the next chunk, and insert it before next chunk's first line -/// beginning. -/// - When a bytes chunk ends with '\n', there is no additional action required. +/// - When a bytes chunk does not end with "(\r)\n", we should not treat the last segment as a new line +/// message, but keep it for the next chunk, and prepend it to the first line of the next chunk. +/// - When a bytes chunk ends with "(\r)\n", there is no additional action required. pub async fn split_stream(data_stream: BoxSourceStream) { - let mut buf = BytesMut::new(); - let mut last_message = None; + #[for_await] for batch in data_stream { let batch = batch?; @@ -46,54 +45,70 @@ pub async fn split_stream(data_stream: BoxSourceStream) { .unwrap(); let mut offset: usize = offset.parse()?; - - // Never panic because we check batch is not empty - let last_item = batch.last().unwrap(); - let end_offset: usize = last_item.offset.parse::().unwrap() - + last_item - .payload - .as_ref() - .map(|p| p.len()) - .unwrap_or_default(); + let mut buf = BytesMut::new(); for msg in batch { let payload = msg.payload.unwrap_or_default(); buf.extend(payload); } let mut msgs = Vec::new(); - for (i, line) in buf.lines().enumerate() { - let mut line = line?; - - // Insert the trailing of the last chunk in front of the first line, do not count - // the length here. - if i == 0 && last_message.is_some() { - let msg: SourceMessage = std::mem::take(&mut last_message).unwrap(); - let last_payload = msg.payload.unwrap(); - offset -= last_payload.len(); - line = String::from_utf8(last_payload).unwrap() + &line; - } - let len = line.as_bytes().len(); - - msgs.push(SourceMessage { - key: None, - payload: Some(line.into()), - offset: (offset + len).to_string(), - split_id: split_id.clone(), - meta: meta.clone(), - }); - offset += len; - offset += 1; - } - if offset > end_offset { - last_message = msgs.pop(); - } + let mut cursor = Cursor::new(buf.freeze()); + let mut line_cnt: usize = 0; + loop { + let mut line = String::new(); + match cursor.read_line(&mut line).await { + Ok(0) => { + if !msgs.is_empty() { + yield msgs; + } + break; + } + Ok(_n) => { + if line_cnt == 0 && last_message.is_some() { + let msg: SourceMessage = std::mem::take(&mut last_message).unwrap(); + let last_payload = msg.payload.unwrap(); + offset -= last_payload.len(); + line.insert_str(0, &String::from_utf8(last_payload).unwrap()); + } + + let mut separator = String::with_capacity(2); + for delim in ['\n', '\r'] { + if line.ends_with(delim) { + separator.insert(0, line.pop().unwrap()); + } else { + // If the data is batched as "XXXX\r" and "\nXXXX", + // the line will be "XXXX\r" here because the cursor reaches EOF. + // Hence we should break the delim loop here, + // otherwise the \r would be treated as separator even without \n. + break; + } + } + + let len = line.len(); + + offset += len + separator.len(); + let msg = SourceMessage { + key: None, + payload: Some(line.into()), + offset: offset.to_string(), + split_id: split_id.clone(), + meta: meta.clone(), + }; - if !msgs.is_empty() { - yield msgs; - } + msgs.push(msg); - buf.clear(); + if separator.is_empty() { + // Not ending with \n, prepend to the first line of the next batch + last_message = msgs.pop(); + } + } + Err(e) => return Err(anyhow!(e)), + } + + line_cnt += 1; + } } + if let Some(msg) = last_message { yield vec![msg]; } @@ -109,40 +124,74 @@ mod tests { #[tokio::test] async fn test_split_stream() { - const N1: usize = 10000; - const N2: usize = 500; - const N3: usize = 50; - let lines = (0..N1) - .map(|x| (0..x % N2).map(|_| 'A').collect::()) - .collect::>(); - let total_chars = lines.iter().map(|e| e.len()).sum::(); - let text = lines.join("\n").into_bytes(); - let split_id: Arc = "1".to_string().into_boxed_str().into(); - let s = text - .chunks(N2) - .enumerate() - .map(move |(i, e)| { - Ok(e.chunks(N3) - .enumerate() - .map(|(j, buf)| SourceMessage { - key: None, - payload: Some(buf.to_owned()), - offset: (i * N2 + j * N3).to_string(), - split_id: split_id.clone(), - meta: crate::source::SourceMeta::Empty, - }) - .collect::>()) - }) - .collect::>(); - let stream = futures::stream::iter(s).boxed(); - let msg_stream = split_stream(stream).try_collect::>().await.unwrap(); - let items = msg_stream - .into_iter() - .flatten() - .map(|e| String::from_utf8(e.payload.unwrap()).unwrap()) - .collect::>(); - assert_eq!(items.len(), N1); - let text = items.join(""); - assert_eq!(text.len(), total_chars); + // Test with tail separators. + for tail_separator in ["", "\n", "\r\n"] { + const N1: usize = 10000; + const N2: usize = 500; + const N3: usize = 50; + let lines = (0..N1) + .map(|x| (0..x % N2).map(|_| 'A').collect::()) + .collect::>(); + let total_chars = lines.iter().map(|e| e.len()).sum::(); + // Join lines with \n & \r\n alternately + let delims = ["\n", "\r\n"]; + let text = lines + .iter() + .enumerate() + .skip(1) + .fold(lines[0].clone(), |acc, (idx, now)| { + format!("{}{}{}", acc, delims[idx % 2], now) + }) + + tail_separator; + let text = text.into_bytes(); + let split_id: Arc = "1".to_string().into_boxed_str().into(); + let s = text + .chunks(N2) + .enumerate() + .map(move |(i, e)| { + Ok(e.chunks(N3) + .enumerate() + .map(|(j, buf)| SourceMessage { + key: None, + payload: Some(buf.to_owned()), + offset: (i * N2 + j * N3).to_string(), + split_id: split_id.clone(), + meta: crate::source::SourceMeta::Empty, + }) + .collect::>()) + }) + .collect::>(); + let stream = futures::stream::iter(s).boxed(); + let msg_stream = split_stream(stream).try_collect::>().await.unwrap(); + // Check the correctness of each line's offset + let mut expected_offset: usize = 0; + msg_stream + .iter() + .flatten() + .enumerate() + .for_each(|(idx, msg)| { + expected_offset += lines[idx].len() + + if idx < lines.len() - 1 { + delims[1 - idx % 2].len() + } else { + tail_separator.len() + }; + assert_eq!( + msg.offset.parse::().unwrap(), + expected_offset, + "idx = {}, tail_separator = {:?}", + idx, + tail_separator + ); + }); + let items = msg_stream + .into_iter() + .flatten() + .map(|e| String::from_utf8(e.payload.unwrap()).unwrap()) + .collect::>(); + assert_eq!(items.len(), N1); + let text = items.join(""); + assert_eq!(text.len(), total_chars); + } } } diff --git a/src/connector/src/source/filesystem/s3/enumerator.rs b/src/connector/src/source/filesystem/s3/enumerator.rs index 6ca19e673aa33..7c16c087d14c6 100644 --- a/src/connector/src/source/filesystem/s3/enumerator.rs +++ b/src/connector/src/source/filesystem/s3/enumerator.rs @@ -12,20 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use anyhow::{anyhow, Context}; +use anyhow::Context; use async_trait::async_trait; use aws_sdk_s3::client::Client; -use aws_sdk_s3::error::DisplayErrorContext; -use itertools::Itertools; use crate::aws_auth::AwsAuthProps; use crate::aws_utils::{default_conn_config, s3_client}; use crate::source::filesystem::file_common::FsSplit; use crate::source::filesystem::s3::S3Properties; -use crate::source::{SourceEnumeratorContextRef, SplitEnumerator}; +use crate::source::{FsListInner, SourceEnumeratorContextRef, SplitEnumerator}; /// Get the prefix from a glob -fn get_prefix(glob: &str) -> String { +pub fn get_prefix(glob: &str) -> String { let mut escaped = false; let mut escaped_filter = false; glob.chars() @@ -59,11 +57,14 @@ fn get_prefix(glob: &str) -> String { #[derive(Debug, Clone)] pub struct S3SplitEnumerator { - bucket_name: String, + pub(crate) bucket_name: String, // prefix is used to reduce the number of objects to be listed - prefix: Option, - matcher: Option, - client: Client, + pub(crate) prefix: Option, + pub(crate) matcher: Option, + pub(crate) client: Client, + + // token get the next page, used when the current page is truncated + pub(crate) next_continuation_token: Option, } #[async_trait] @@ -92,53 +93,26 @@ impl SplitEnumerator for S3SplitEnumerator { matcher, prefix, client: s3_client, + next_continuation_token: None, }) } async fn list_splits(&mut self) -> anyhow::Result> { let mut objects = Vec::new(); - let mut next_continuation_token = None; loop { - let mut req = self - .client - .list_objects_v2() - .bucket(&self.bucket_name) - .set_prefix(self.prefix.clone()); - if let Some(continuation_token) = next_continuation_token.take() { - req = req.continuation_token(continuation_token); - } - let mut res = req - .send() - .await - .map_err(|e| anyhow!(DisplayErrorContext(e)))?; - objects.extend(res.contents.take().unwrap_or_default()); - if res.is_truncated() { - next_continuation_token = Some(res.next_continuation_token.unwrap()) - } else { + let (files, has_finished) = self.get_next_page::().await?; + objects.extend(files); + if has_finished { break; } } - - let matched_objs = objects - .iter() - .filter(|obj| obj.key().is_some()) - .filter(|obj| { - self.matcher - .as_ref() - .map(|m| m.matches(obj.key().unwrap())) - .unwrap_or(true) - }) - .collect_vec(); - - Ok(matched_objs - .into_iter() - .map(|obj| FsSplit::new(obj.key().unwrap().to_owned(), 0, obj.size() as usize)) - .collect_vec()) + Ok(objects) } } #[cfg(test)] mod tests { + use itertools::Itertools; #[test] fn test_get_prefix() { diff --git a/src/connector/src/source/filesystem/s3/mod.rs b/src/connector/src/source/filesystem/s3/mod.rs index 62f6bcd922a80..464a11f99fc09 100644 --- a/src/connector/src/source/filesystem/s3/mod.rs +++ b/src/connector/src/source/filesystem/s3/mod.rs @@ -19,6 +19,8 @@ use serde::Deserialize; pub use source::S3FileReader; use crate::aws_auth::AwsAuthProps; +use crate::source::filesystem::FsSplit; +use crate::source::SourceProperties; pub const S3_CONNECTOR: &str = "s3"; @@ -35,7 +37,15 @@ pub struct S3Properties { #[serde(rename = "s3.credentials.secret", default)] pub secret: Option, #[serde(rename = "s3.endpoint_url")] - endpoint_url: Option, + pub endpoint_url: Option, +} + +impl SourceProperties for S3Properties { + type Split = FsSplit; + type SplitEnumerator = S3SplitEnumerator; + type SplitReader = S3FileReader; + + const SOURCE_NAME: &'static str = S3_CONNECTOR; } impl From<&S3Properties> for AwsAuthProps { diff --git a/src/connector/src/source/filesystem/s3/source/reader.rs b/src/connector/src/source/filesystem/s3/source/reader.rs index b20822139d181..b1e368a2b409e 100644 --- a/src/connector/src/source/filesystem/s3/source/reader.rs +++ b/src/connector/src/source/filesystem/s3/source/reader.rs @@ -37,7 +37,7 @@ use crate::source::filesystem::file_common::FsSplit; use crate::source::filesystem::nd_streaming; use crate::source::filesystem::s3::S3Properties; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SourceMeta, SplitImpl, + BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SourceMeta, StreamChunkWithState, }; const MAX_CHANNEL_BUFFER_SIZE: usize = 2048; @@ -55,7 +55,7 @@ pub struct S3FileReader { impl S3FileReader { #[try_stream(boxed, ok = Vec, error = anyhow::Error)] - async fn stream_read_object( + pub async fn stream_read_object( client_for_s3: s3_client::Client, bucket_name: String, split: FsSplit, @@ -137,7 +137,7 @@ impl S3FileReader { } } - async fn get_object( + pub async fn get_object( client_for_s3: &s3_client::Client, bucket_name: &str, object_name: &str, @@ -164,10 +164,11 @@ impl S3FileReader { #[async_trait] impl SplitReader for S3FileReader { type Properties = S3Properties; + type Split = FsSplit; async fn new( props: S3Properties, - state: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, @@ -179,10 +180,6 @@ impl SplitReader for S3FileReader { let bucket_name = props.bucket_name; let s3_client = s3_client(&sdk_config, Some(default_conn_config())); - let splits = state - .into_iter() - .map(|split| split.into_fs().expect("not a fs split")) - .collect(); let s3_file_reader = S3FileReader { split_offset: HashMap::new(), bucket_name, @@ -272,8 +269,6 @@ mod tests { let splits = enumerator.list_splits().await.unwrap(); println!("splits {:?}", splits); - let splits = splits.into_iter().map(SplitImpl::S3).collect(); - let descs = vec![ SourceColumnDesc::simple("id", DataType::Int64, 1.into()), SourceColumnDesc::simple("name", DataType::Varchar, 2.into()), diff --git a/src/connector/src/source/filesystem/s3_v2/lister.rs b/src/connector/src/source/filesystem/s3_v2/lister.rs new file mode 100644 index 0000000000000..3c55f23d37f67 --- /dev/null +++ b/src/connector/src/source/filesystem/s3_v2/lister.rs @@ -0,0 +1,66 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use async_trait::async_trait; +use aws_sdk_s3::error::DisplayErrorContext; +use aws_sdk_s3::types::Object; +use itertools::Itertools; + +use crate::source::filesystem::{FsPageItem, S3SplitEnumerator}; +use crate::source::{FsFilterCtrlCtx, FsListInner}; + +#[async_trait] +impl FsListInner for S3SplitEnumerator { + async fn get_next_page From<&'a Object>>( + &mut self, + ) -> anyhow::Result<(Vec, bool)> { + let mut has_finished = false; + let mut req = self + .client + .list_objects_v2() + .bucket(&self.bucket_name) + .set_prefix(self.prefix.clone()); + if let Some(continuation_token) = self.next_continuation_token.take() { + req = req.continuation_token(continuation_token); + } + let mut res = req + .send() + .await + .map_err(|e| anyhow!(DisplayErrorContext(e)))?; + if res.is_truncated() { + self.next_continuation_token = res.next_continuation_token.clone(); + } else { + has_finished = true; + self.next_continuation_token = None; + } + let objects = res.contents.take().unwrap_or_default(); + let matched_objs: Vec = objects + .iter() + .filter(|obj| obj.key().is_some()) + .filter(|obj| { + self.matcher + .as_ref() + .map(|m| m.matches(obj.key().unwrap())) + .unwrap_or(true) + }) + .map(T::from) + .collect_vec(); + Ok((matched_objs, has_finished)) + } + + fn filter_policy(&self, _ctx: &FsFilterCtrlCtx, _page_num: usize, _item: &FsPageItem) -> bool { + true + } +} diff --git a/src/expr/src/function/window/mod.rs b/src/connector/src/source/filesystem/s3_v2/mod.rs similarity index 94% rename from src/expr/src/function/window/mod.rs rename to src/connector/src/source/filesystem/s3_v2/mod.rs index add145718c948..6fab862daca1e 100644 --- a/src/expr/src/function/window/mod.rs +++ b/src/connector/src/source/filesystem/s3_v2/mod.rs @@ -12,4 +12,4 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use crate::window_function::*; +pub mod lister; diff --git a/src/connector/src/source/google_pubsub/mod.rs b/src/connector/src/source/google_pubsub/mod.rs index 0c93f672ccdb5..c4c2e5c716a13 100644 --- a/src/connector/src/source/google_pubsub/mod.rs +++ b/src/connector/src/source/google_pubsub/mod.rs @@ -23,6 +23,8 @@ use serde_with::{serde_as, DisplayFromStr}; pub use source::*; pub use split::*; +use crate::source::SourceProperties; + pub const GOOGLE_PUBSUB_CONNECTOR: &str = "google_pubsub"; #[serde_as] @@ -70,6 +72,14 @@ pub struct PubsubProperties { pub start_snapshot: Option, } +impl SourceProperties for PubsubProperties { + type Split = PubsubSplit; + type SplitEnumerator = PubsubSplitEnumerator; + type SplitReader = PubsubSplitReader; + + const SOURCE_NAME: &'static str = GOOGLE_PUBSUB_CONNECTOR; +} + impl PubsubProperties { /// `initialize_env` sets environment variables read by the `google-cloud-pubsub` crate pub(crate) fn initialize_env(&self) { diff --git a/src/connector/src/source/google_pubsub/source/message.rs b/src/connector/src/source/google_pubsub/source/message.rs index 490dc86234348..398ae82febade 100644 --- a/src/connector/src/source/google_pubsub/source/message.rs +++ b/src/connector/src/source/google_pubsub/source/message.rs @@ -50,7 +50,7 @@ impl From for SourceMessage { _ => Some(payload), } }, - offset: timestamp.timestamp_nanos().to_string(), + offset: timestamp.timestamp_nanos_opt().unwrap().to_string(), split_id, meta: SourceMeta::GooglePubsub(GooglePubsubMeta { timestamp: Some(timestamp.timestamp_millis()), diff --git a/src/connector/src/source/google_pubsub/source/reader.rs b/src/connector/src/source/google_pubsub/source/reader.rs index d4fa8a9ab5c98..dfe95eeb1b808 100644 --- a/src/connector/src/source/google_pubsub/source/reader.rs +++ b/src/connector/src/source/google_pubsub/source/reader.rs @@ -19,15 +19,14 @@ use futures_async_stream::try_stream; use google_cloud_pubsub::client::Client; use google_cloud_pubsub::subscription::{SeekTo, Subscription}; use risingwave_common::bail; -use tonic::Code; +use tonic_0_9::Code; use super::TaggedReceivedMessage; use crate::parser::ParserConfig; -use crate::source::common::{into_chunk_stream, CommonSplitReader}; -use crate::source::google_pubsub::PubsubProperties; +use crate::source::google_pubsub::{PubsubProperties, PubsubSplit}; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SplitId, SplitImpl, - SplitMetaData, SplitReader, + into_chunk_stream, BoxSourceWithStateStream, Column, CommonSplitReader, SourceContextRef, + SourceMessage, SplitId, SplitMetaData, SplitReader, }; const PUBSUB_MAX_FETCH_MESSAGES: usize = 1024; @@ -107,10 +106,11 @@ impl CommonSplitReader for PubsubSplitReader { #[async_trait] impl SplitReader for PubsubSplitReader { type Properties = PubsubProperties; + type Split = PubsubSplit; async fn new( properties: PubsubProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, @@ -119,12 +119,7 @@ impl SplitReader for PubsubSplitReader { splits.len() == 1, "the pubsub reader only supports a single split" ); - let split = splits - .into_iter() - .next() - .unwrap() - .into_google_pubsub() - .unwrap(); + let split = splits.into_iter().next().unwrap(); // Set environment variables consumed by `google_cloud_pubsub` properties.initialize_env(); diff --git a/src/connector/src/source/google_pubsub/split.rs b/src/connector/src/source/google_pubsub/split.rs index d623a9337cf33..1f598eb6852d4 100644 --- a/src/connector/src/source/google_pubsub/split.rs +++ b/src/connector/src/source/google_pubsub/split.rs @@ -35,13 +35,6 @@ pub struct PubsubSplit { pub(crate) stop_offset: Option, } -impl PubsubSplit { - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - self.start_offset = Some(start_offset); - Ok(()) - } -} - impl SplitMetaData for PubsubSplit { fn restore_from_json(value: JsonbVal) -> anyhow::Result { serde_json::from_value(value.take()).map_err(|e| anyhow!(e)) @@ -54,4 +47,9 @@ impl SplitMetaData for PubsubSplit { fn id(&self) -> SplitId { format!("{}-{}", self.subscription, self.index).into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + self.start_offset = Some(start_offset); + Ok(()) + } } diff --git a/src/connector/src/source/kafka/enumerator/client.rs b/src/connector/src/source/kafka/enumerator/client.rs index 06577aa4e3433..2c0d03306366b 100644 --- a/src/connector/src/source/kafka/enumerator/client.rs +++ b/src/connector/src/source/kafka/enumerator/client.rs @@ -342,6 +342,13 @@ impl KafkaSplitEnumerator { .set(offset); } + pub async fn check_reachability(&self) -> bool { + self.client + .fetch_metadata(None, self.sync_call_timeout) + .await + .is_ok() + } + async fn fetch_topic_partition(&self) -> anyhow::Result> { // for now, we only support one topic let metadata = self diff --git a/src/connector/src/source/kafka/mod.rs b/src/connector/src/source/kafka/mod.rs index 50fabadb41365..c74ae3ac6152f 100644 --- a/src/connector/src/source/kafka/mod.rs +++ b/src/connector/src/source/kafka/mod.rs @@ -27,6 +27,8 @@ pub use source::*; pub use split::*; use crate::common::KafkaCommon; +use crate::source::SourceProperties; + pub const KAFKA_CONNECTOR: &str = "kafka"; pub const KAFKA_PROPS_BROKER_KEY: &str = "properties.bootstrap.server"; pub const KAFKA_PROPS_BROKER_KEY_ALIAS: &str = "kafka.brokers"; @@ -123,6 +125,14 @@ pub struct KafkaProperties { pub rdkafka_properties: RdKafkaPropertiesConsumer, } +impl SourceProperties for KafkaProperties { + type Split = KafkaSplit; + type SplitEnumerator = KafkaSplitEnumerator; + type SplitReader = KafkaSplitReader; + + const SOURCE_NAME: &'static str = KAFKA_CONNECTOR; +} + impl KafkaProperties { pub fn set_client(&self, c: &mut rdkafka::ClientConfig) { self.common.set_client(c); diff --git a/src/connector/src/source/kafka/private_link.rs b/src/connector/src/source/kafka/private_link.rs index 5e090d75475ab..573e14c3e073f 100644 --- a/src/connector/src/source/kafka/private_link.rs +++ b/src/connector/src/source/kafka/private_link.rs @@ -31,6 +31,9 @@ use crate::source::kafka::stats::RdKafkaStats; use crate::source::kafka::{KAFKA_PROPS_BROKER_KEY, KAFKA_PROPS_BROKER_KEY_ALIAS}; use crate::source::KAFKA_CONNECTOR; +pub const PRIVATELINK_ENDPOINT_KEY: &str = "privatelink.endpoint"; +pub const CONNECTION_NAME_KEY: &str = "connection.name"; + #[derive(Debug)] enum PrivateLinkContextRole { Consumer, @@ -204,16 +207,17 @@ fn is_kafka_connector(with_properties: &BTreeMap) -> bool { } pub fn insert_privatelink_broker_rewrite_map( - svc: &PrivateLinkService, properties: &mut BTreeMap, + svc: Option<&PrivateLinkService>, + privatelink_endpoint: Option, ) -> anyhow::Result<()> { let mut broker_rewrite_map = HashMap::new(); - - let link_target_value = get_property_required(properties, PRIVATE_LINK_TARGETS_KEY)?; let servers = get_property_required(properties, kafka_props_broker_key(properties))?; let broker_addrs = servers.split(',').collect_vec(); + let link_target_value = get_property_required(properties, PRIVATE_LINK_TARGETS_KEY)?; let link_targets: Vec = serde_json::from_str(link_target_value.as_str()).map_err(|e| anyhow!(e))?; + if broker_addrs.len() != link_targets.len() { return Err(anyhow!( "The number of broker addrs {} does not match the number of private link targets {}", @@ -222,19 +226,30 @@ pub fn insert_privatelink_broker_rewrite_map( )); } - for (link, broker) in link_targets.iter().zip_eq_fast(broker_addrs.into_iter()) { - if svc.dns_entries.is_empty() { - return Err(anyhow!( - "No available private link endpoints for Kafka broker {}", - broker - )); + if let Some(endpoint) = privatelink_endpoint { + for (link, broker) in link_targets.iter().zip_eq_fast(broker_addrs.into_iter()) { + // rewrite the broker address to endpoint:port + broker_rewrite_map.insert(broker.to_string(), format!("{}:{}", &endpoint, link.port)); + } + } else { + if svc.is_none() { + return Err(anyhow!("Privatelink endpoint not found.",)); + } + let svc = svc.unwrap(); + for (link, broker) in link_targets.iter().zip_eq_fast(broker_addrs.into_iter()) { + if svc.dns_entries.is_empty() { + return Err(anyhow!( + "No available private link endpoints for Kafka broker {}", + broker + )); + } + // rewrite the broker address to the dns name w/o az + // requires the NLB has enabled the cross-zone load balancing + broker_rewrite_map.insert( + broker.to_string(), + format!("{}:{}", &svc.endpoint_dns_name, link.port), + ); } - // rewrite the broker address to the dns name w/o az - // requires the NLB has enabled the cross-zone load balancing - broker_rewrite_map.insert( - broker.to_string(), - format!("{}:{}", &svc.endpoint_dns_name, link.port), - ); } // save private link dns names into source properties, which diff --git a/src/connector/src/source/kafka/source/reader.rs b/src/connector/src/source/kafka/source/reader.rs index 65887bb825f92..f9f6a9472a1a5 100644 --- a/src/connector/src/source/kafka/source/reader.rs +++ b/src/connector/src/source/kafka/source/reader.rs @@ -28,13 +28,12 @@ use rdkafka::{ClientConfig, Message, Offset, TopicPartitionList}; use crate::parser::ParserConfig; use crate::source::base::SourceMessage; -use crate::source::common::{into_chunk_stream, CommonSplitReader}; use crate::source::kafka::{ KafkaProperties, KafkaSplit, PrivateLinkConsumerContext, KAFKA_ISOLATION_LEVEL, }; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SplitId, SplitImpl, SplitMetaData, - SplitReader, + into_chunk_stream, BoxSourceWithStateStream, Column, CommonSplitReader, SourceContextRef, + SplitId, SplitMetaData, SplitReader, }; pub struct KafkaSplitReader { @@ -49,10 +48,11 @@ pub struct KafkaSplitReader { #[async_trait] impl SplitReader for KafkaSplitReader { type Properties = KafkaProperties; + type Split = KafkaSplit; async fn new( properties: KafkaProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, @@ -107,11 +107,6 @@ impl SplitReader for KafkaSplitReader { .await .map_err(|e| anyhow!("failed to create kafka consumer: {}", e))?; - let splits = splits - .into_iter() - .map(|split| split.into_kafka().unwrap()) - .collect::>(); - let mut tpl = TopicPartitionList::with_capacity(splits.len()); let mut offsets = HashMap::new(); diff --git a/src/connector/src/source/kafka/split.rs b/src/connector/src/source/kafka/split.rs index 9d67b4496fe52..31c834d5f1609 100644 --- a/src/connector/src/source/kafka/split.rs +++ b/src/connector/src/source/kafka/split.rs @@ -39,6 +39,11 @@ impl SplitMetaData for KafkaSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + self.start_offset = Some(start_offset.as_str().parse::().unwrap()); + Ok(()) + } } impl KafkaSplit { @@ -56,11 +61,6 @@ impl KafkaSplit { } } - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - self.start_offset = Some(start_offset.as_str().parse::().unwrap()); - Ok(()) - } - pub fn get_topic_and_partition(&self) -> (String, i32) { (self.topic.clone(), self.partition) } diff --git a/src/connector/src/source/kafka/stats.rs b/src/connector/src/source/kafka/stats.rs index 5629dee572e90..7feaf3cc3e91b 100644 --- a/src/connector/src/source/kafka/stats.rs +++ b/src/connector/src/source/kafka/stats.rs @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; - -use itertools::Itertools; use prometheus::core::{AtomicU64, GenericGaugeVec}; use prometheus::{opts, register_int_gauge_vec_with_registry, IntGaugeVec, Registry}; use rdkafka::statistics::{Broker, ConsumerGroup, Partition, Topic, Window}; @@ -363,26 +360,13 @@ impl TopicStats { } } - pub fn report( - &self, - id: &str, - client_id: &str, - mapping: &HashMap<(String, i32), String>, - stats: &Statistics, - ) { + pub fn report(&self, id: &str, client_id: &str, stats: &Statistics) { for (topic, topic_stats) in &stats.topics { - self.report_inner(id, client_id, topic, mapping, topic_stats); + self.report_inner(id, client_id, topic, topic_stats); } } - fn report_inner( - &self, - id: &str, - client_id: &str, - topic: &str, - mapping: &HashMap<(String, i32), String>, - stats: &Topic, - ) { + fn report_inner(&self, id: &str, client_id: &str, topic: &str, stats: &Topic) { self.metadata_age .with_label_values(&[id, client_id, topic]) .set(stats.metadata_age); @@ -390,7 +374,7 @@ impl TopicStats { .report(id, client_id, "", topic, &stats.batchsize); self.batch_cnt .report(id, client_id, "", topic, &stats.batchcnt); - self.partitions.report(id, client_id, topic, mapping, stats) + self.partitions.report(id, client_id, topic, stats) } } @@ -431,182 +415,182 @@ impl PartitionStats { let msgq_cnt = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_msgq_cnt", "Number of messages in the producer queue", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let msgq_bytes = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_msgq_bytes", "Size of messages in the producer queue", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let xmit_msgq_cnt = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_xmit_msgq_cnt", "Number of messages in the transmit queue", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let xmit_msgq_bytes = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_xmit_msgq_bytes", "Size of messages in the transmit queue", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let fetchq_cnt = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_fetchq_cnt", "Number of messages in the fetch queue", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let fetchq_size = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_fetchq_size", "Size of messages in the fetch queue", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let query_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_query_offset", "Current query offset", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let next_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_next_offset", "Next offset to query", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let app_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_app_offset", "Last acknowledged offset", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let stored_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_stored_offset", "Last stored offset", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let committed_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_committed_offset", "Last committed offset", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let eof_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_eof_offset", "Last offset in broker log", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let lo_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_lo_offset", "Low offset", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let hi_offset = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_hi_offset", "High offset", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let consumer_lag = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_consumer_lag", "Consumer lag", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let consumer_lag_store = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_consumer_lag_store", "Consumer lag stored", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let txmsgs = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_txmsgs", "Number of transmitted messages", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let txbytes = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_txbytes", "Number of transmitted bytes", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let rxmsgs = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_rxmsgs", "Number of received messages", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let rxbytes = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_rxbytes", "Number of received bytes", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let msgs = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_msgs", "Number of messages in partition", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let rx_ver_drops = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_rx_ver_drops", "Number of received messages dropped due to version mismatch", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let msgs_inflight = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_msgs_inflight", "Number of messages in-flight", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let next_ack_seq = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_next_ack_seq", "Next ack sequence number", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let next_err_seq = register_int_gauge_vec_with_registry!( "rdkafka_topic_partition_next_err_seq", "Next error sequence number", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); let acked_msgid = register_uint_gauge_vec_with_registry!( "rdkafka_topic_partition_acked_msgid", "Acknowledged message ID", - &["id", "client_id", "broker", "topic", "partition"], + &["id", "client_id", "topic", "partition"], registry ) .unwrap(); @@ -642,47 +626,14 @@ impl PartitionStats { } } - pub fn report( - &self, - id: &str, - client_id: &str, - topic: &str, - broker_mapping: &HashMap<(String, i32), String>, - stats: &Topic, - ) { + pub fn report(&self, id: &str, client_id: &str, topic: &str, stats: &Topic) { for partition_stats in stats.partitions.values() { - self.report_inner(id, client_id, topic, broker_mapping, partition_stats); + self.report_inner(id, client_id, topic, partition_stats); } } - fn report_inner( - &self, - id: &str, - client_id: &str, - topic: &str, - broker_mapping: &HashMap<(String, i32), String>, - stats: &Partition, - ) { - let broker_name = match broker_mapping.get(&(topic.to_string(), stats.partition)) { - Some(broker_name) => broker_name.as_str(), - None => { - tracing::warn!( - "Cannot find broker name for topic {} partition {}, id {}, client_id {}", - topic, - stats.partition, - id, - client_id - ); - return; - } - }; - let labels = [ - id, - client_id, - broker_name, - topic, - &stats.partition.to_string(), - ]; + fn report_inner(&self, id: &str, client_id: &str, topic: &str, stats: &Partition) { + let labels = [id, client_id, topic, &stats.partition.to_string()]; self.msgq_cnt.with_label_values(&labels).set(stats.msgq_cnt); self.msgq_bytes @@ -914,8 +865,6 @@ impl RdKafkaStats { } pub fn report(&self, id: &str, stats: &Statistics) { - let topic_partition_to_broker_mapping = get_topic_partition_to_broker_mapping(stats); - let client_id = stats.name.as_str(); self.ts.with_label_values(&[id, client_id]).set(stats.ts); self.time @@ -965,34 +914,13 @@ impl RdKafkaStats { .set(stats.metadata_cache_cnt); self.broker_stats.report(id, client_id, stats); - self.topic_stats - .report(id, client_id, &topic_partition_to_broker_mapping, stats); + self.topic_stats.report(id, client_id, stats); if let Some(cgrp) = &stats.cgrp { self.cgrp.report(id, client_id, cgrp) } } } -#[inline] -fn get_topic_partition_to_broker_mapping(stats: &Statistics) -> HashMap<(String, i32), String> { - let topic_partition_to_broker_mapping = stats - .brokers - .values() - .flat_map(|broker| { - let broker_name = &broker.name; - broker - .toppars - .iter() - .map(|(topic, partition)| { - ((topic.clone(), partition.partition), broker_name.clone()) - }) - .collect_vec() - }) - .collect::>(); - - topic_partition_to_broker_mapping -} - impl BrokerStats { pub fn new(registry: Registry) -> Self { let state_age = register_int_gauge_vec_with_registry!( diff --git a/src/connector/src/source/kinesis/mod.rs b/src/connector/src/source/kinesis/mod.rs index fc786f8f1b10d..993e28379d6ff 100644 --- a/src/connector/src/source/kinesis/mod.rs +++ b/src/connector/src/source/kinesis/mod.rs @@ -19,20 +19,30 @@ pub mod split; use serde::Deserialize; use crate::common::KinesisCommon; +use crate::source::kinesis::enumerator::client::KinesisSplitEnumerator; +use crate::source::kinesis::source::reader::KinesisSplitReader; +use crate::source::kinesis::split::KinesisSplit; +use crate::source::SourceProperties; pub const KINESIS_CONNECTOR: &str = "kinesis"; #[derive(Clone, Debug, Deserialize)] pub struct KinesisProperties { #[serde(rename = "scan.startup.mode", alias = "kinesis.scan.startup.mode")] - // accepted values: "latest", "earliest", "sequence_number" + // accepted values: "latest", "earliest", "timestamp" pub scan_startup_mode: Option, - #[serde( - rename = "scan.startup.sequence_number", - alias = "kinesis.scan.startup.sequence_number" - )] - pub seq_offset: Option, + + #[serde(rename = "scan.startup.timestamp.millis")] + pub timestamp_offset: Option, #[serde(flatten)] pub common: KinesisCommon, } + +impl SourceProperties for KinesisProperties { + type Split = KinesisSplit; + type SplitEnumerator = KinesisSplitEnumerator; + type SplitReader = KinesisSplitReader; + + const SOURCE_NAME: &'static str = KINESIS_CONNECTOR; +} diff --git a/src/connector/src/source/kinesis/source/reader.rs b/src/connector/src/source/kinesis/source/reader.rs index f4aad6ad80587..4ed00b88a58b2 100644 --- a/src/connector/src/source/kinesis/source/reader.rs +++ b/src/connector/src/source/kinesis/source/reader.rs @@ -18,19 +18,19 @@ use anyhow::{anyhow, Result}; use async_trait::async_trait; use aws_sdk_kinesis::error::{DisplayErrorContext, SdkError}; use aws_sdk_kinesis::operation::get_records::{GetRecordsError, GetRecordsOutput}; +use aws_sdk_kinesis::primitives::DateTime; use aws_sdk_kinesis::types::ShardIteratorType; use aws_sdk_kinesis::Client as KinesisClient; use futures_async_stream::try_stream; use tokio_retry; use crate::parser::ParserConfig; -use crate::source::common::{into_chunk_stream, CommonSplitReader}; use crate::source::kinesis::source::message::KinesisMessage; -use crate::source::kinesis::split::KinesisOffset; +use crate::source::kinesis::split::{KinesisOffset, KinesisSplit}; use crate::source::kinesis::KinesisProperties; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SplitId, SplitImpl, - SplitMetaData, SplitReader, + into_chunk_stream, BoxSourceWithStateStream, Column, CommonSplitReader, SourceContextRef, + SourceMessage, SplitId, SplitMetaData, SplitReader, }; #[derive(Debug, Clone)] @@ -51,17 +51,18 @@ pub struct KinesisSplitReader { #[async_trait] impl SplitReader for KinesisSplitReader { type Properties = KinesisProperties; + type Split = KinesisSplit; async fn new( properties: KinesisProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, ) -> Result { assert!(splits.len() == 1); - let split = splits.into_iter().next().unwrap().into_kinesis().unwrap(); + let split = splits.into_iter().next().unwrap(); let start_position = match &split.start_position { KinesisOffset::None => match &properties.scan_startup_mode { @@ -69,16 +70,16 @@ impl SplitReader for KinesisSplitReader { Some(mode) => match mode.as_str() { "earliest" => KinesisOffset::Earliest, "latest" => KinesisOffset::Latest, - "sequence_number" => { - if let Some(seq) = &properties.seq_offset { - KinesisOffset::SequenceNumber(seq.clone()) + "timestamp" => { + if let Some(ts) = &properties.timestamp_offset { + KinesisOffset::Timestamp(*ts) } else { - return Err(anyhow!("scan_startup_sequence_number is required")); + return Err(anyhow!("scan.startup.timestamp.millis is required")); } } _ => { return Err(anyhow!( - "invalid scan_startup_mode, accept earliest/latest/sequence_number" + "invalid scan_startup_mode, accept earliest/latest/timestamp" )) } }, @@ -86,11 +87,11 @@ impl SplitReader for KinesisSplitReader { start_position => start_position.to_owned(), }; - if !matches!(start_position, KinesisOffset::SequenceNumber(_)) - && properties.seq_offset.is_some() + if !matches!(start_position, KinesisOffset::Timestamp(_)) + && properties.timestamp_offset.is_some() { return Err( - anyhow!("scan.startup.mode need to be set to 'sequence_number' if you want to start with a specific sequence number") + anyhow!("scan.startup.mode need to be set to 'timestamp' if you want to start with a specific timestamp") ); } @@ -208,27 +209,37 @@ impl CommonSplitReader for KinesisSplitReader { } impl KinesisSplitReader { async fn new_shard_iter(&mut self) -> Result<()> { - let (starting_seq_num, iter_type) = if self.latest_offset.is_some() { + let (starting_seq_num, start_timestamp, iter_type) = if self.latest_offset.is_some() { ( self.latest_offset.clone(), + None, ShardIteratorType::AfterSequenceNumber, ) } else { match &self.start_position { - KinesisOffset::Earliest => (None, ShardIteratorType::TrimHorizon), - KinesisOffset::SequenceNumber(seq) => { - (Some(seq.clone()), ShardIteratorType::AfterSequenceNumber) - } - KinesisOffset::Latest => (None, ShardIteratorType::Latest), + KinesisOffset::Earliest => (None, None, ShardIteratorType::TrimHorizon), + KinesisOffset::SequenceNumber(seq) => ( + Some(seq.clone()), + None, + ShardIteratorType::AfterSequenceNumber, + ), + KinesisOffset::Latest => (None, None, ShardIteratorType::Latest), + KinesisOffset::Timestamp(ts) => ( + None, + Some(DateTime::from_millis(*ts)), + ShardIteratorType::AtTimestamp, + ), _ => unreachable!(), } }; + // `starting_seq_num` and `starting_timestamp` will not be both set async fn get_shard_iter_inner( client: &KinesisClient, stream_name: &str, shard_id: &str, starting_seq_num: Option, + starting_timestamp: Option, iter_type: ShardIteratorType, ) -> Result { let resp = client @@ -237,8 +248,10 @@ impl KinesisSplitReader { .shard_id(shard_id) .shard_iterator_type(iter_type) .set_starting_sequence_number(starting_seq_num) + .set_timestamp(starting_timestamp) .send() - .await?; + .await + .map_err(|e| anyhow!(DisplayErrorContext(e)))?; if let Some(iter) = resp.shard_iterator() { Ok(iter.to_owned()) @@ -256,6 +269,7 @@ impl KinesisSplitReader { &self.stream_name, &self.shard_id, starting_seq_num.clone(), + start_timestamp, iter_type.clone(), ) }, @@ -307,18 +321,15 @@ mod tests { }, scan_startup_mode: None, - seq_offset: Some( - // redundant seq number - "49629139817504901062972448413535783695568426186596941842".to_string(), - ), + timestamp_offset: Some(123456789098765432), }; let client = KinesisSplitReader::new( properties, - vec![SplitImpl::Kinesis(KinesisSplit { + vec![KinesisSplit { shard_id: "shardId-000000000001".to_string().into(), start_position: KinesisOffset::Earliest, end_position: KinesisOffset::None, - })], + }], Default::default(), Default::default(), None, @@ -343,16 +354,16 @@ mod tests { }, scan_startup_mode: None, - seq_offset: None, + timestamp_offset: None, }; let trim_horizen_reader = KinesisSplitReader::new( properties.clone(), - vec![SplitImpl::Kinesis(KinesisSplit { + vec![KinesisSplit { shard_id: "shardId-000000000001".to_string().into(), start_position: KinesisOffset::Earliest, end_position: KinesisOffset::None, - })], + }], Default::default(), Default::default(), None, @@ -364,13 +375,13 @@ mod tests { let offset_reader = KinesisSplitReader::new( properties.clone(), - vec![SplitImpl::Kinesis(KinesisSplit { + vec![KinesisSplit { shard_id: "shardId-000000000001".to_string().into(), start_position: KinesisOffset::SequenceNumber( "49629139817504901062972448413535783695568426186596941842".to_string(), ), end_position: KinesisOffset::None, - })], + }], Default::default(), Default::default(), None, diff --git a/src/connector/src/source/kinesis/split.rs b/src/connector/src/source/kinesis/split.rs index 09138d842b8ea..e03ecb59bfcd0 100644 --- a/src/connector/src/source/kinesis/split.rs +++ b/src/connector/src/source/kinesis/split.rs @@ -46,6 +46,17 @@ impl SplitMetaData for KinesisSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + let start_offset = if start_offset.is_empty() { + KinesisOffset::Earliest + } else { + KinesisOffset::SequenceNumber(start_offset) + }; + + self.start_position = start_offset; + Ok(()) + } } impl KinesisSplit { @@ -60,15 +71,4 @@ impl KinesisSplit { end_position, } } - - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - let start_offset = if start_offset.is_empty() { - KinesisOffset::Earliest - } else { - KinesisOffset::SequenceNumber(start_offset) - }; - - self.start_position = start_offset; - Ok(()) - } } diff --git a/src/connector/src/source/mock_external_table.rs b/src/connector/src/source/mock_external_table.rs index c4e16aae6ae85..7224a32b9e571 100644 --- a/src/connector/src/source/mock_external_table.rs +++ b/src/connector/src/source/mock_external_table.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::future::Future; use std::sync::atomic::AtomicUsize; use futures::stream::BoxStream; @@ -91,24 +90,21 @@ impl MockExternalTableReader { } impl ExternalTableReader for MockExternalTableReader { - type CdcOffsetFuture<'a> = impl Future> + 'a; - fn get_normalized_table_name(&self, _table_name: &SchemaTableName) -> String { "`mock_table`".to_string() } - fn current_cdc_offset(&self) -> Self::CdcOffsetFuture<'_> { + async fn current_cdc_offset(&self) -> ConnectorResult { static IDX: AtomicUsize = AtomicUsize::new(0); - async move { - let idx = IDX.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - if idx < self.binlog_watermarks.len() { - Ok(CdcOffset::MySql(self.binlog_watermarks[idx].clone())) - } else { - Ok(CdcOffset::MySql(MySqlOffset { - filename: "1.binlog".to_string(), - position: u64::MAX, - })) - } + + let idx = IDX.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + if idx < self.binlog_watermarks.len() { + Ok(CdcOffset::MySql(self.binlog_watermarks[idx].clone())) + } else { + Ok(CdcOffset::MySql(MySqlOffset { + filename: "1.binlog".to_string(), + position: u64::MAX, + })) } } diff --git a/src/connector/src/source/mod.rs b/src/connector/src/source/mod.rs index ed3979ba67ca2..869b7089ac271 100644 --- a/src/connector/src/source/mod.rs +++ b/src/connector/src/source/mod.rs @@ -16,7 +16,6 @@ pub mod base; pub mod cdc; pub mod data_gen_util; pub mod datagen; -pub mod dummy_connector; pub mod filesystem; pub mod google_pubsub; pub mod kafka; @@ -26,6 +25,7 @@ pub mod nats; pub mod nexmark; pub mod pulsar; pub use base::*; +pub(crate) use common::*; pub use google_pubsub::GOOGLE_PUBSUB_CONNECTOR; pub use kafka::KAFKA_CONNECTOR; pub use kinesis::KINESIS_CONNECTOR; @@ -34,8 +34,11 @@ mod common; pub mod external; mod manager; mod mock_external_table; +pub mod test_source; + pub use manager::{SourceColumnDesc, SourceColumnType}; pub use mock_external_table::MockExternalTableReader; +pub use crate::source::filesystem::{S3_CONNECTOR, S3_V2_CONNECTOR}; pub use crate::source::nexmark::NEXMARK_CONNECTOR; pub use crate::source::pulsar::PULSAR_CONNECTOR; diff --git a/src/connector/src/source/monitor/metrics.rs b/src/connector/src/source/monitor/metrics.rs index c6ea9998e55e4..fa3e836993c4f 100644 --- a/src/connector/src/source/monitor/metrics.rs +++ b/src/connector/src/source/monitor/metrics.rs @@ -62,6 +62,8 @@ pub struct SourceMetrics { /// Report latest message id pub latest_message_id: GenericGaugeVec, pub rdkafka_native_metric: Arc, + + pub connector_source_rows_received: GenericCounterVec, } pub static GLOBAL_SOURCE_METRICS: LazyLock = @@ -103,6 +105,15 @@ impl SourceMetrics { registry, ) .unwrap(); + + let connector_source_rows_received = register_int_counter_vec_with_registry!( + "connector_source_rows_received", + "Number of rows received by source", + &["source_type", "source_id"], + registry + ) + .unwrap(); + let rdkafka_native_metric = Arc::new(RdKafkaStats::new(registry.clone())); SourceMetrics { partition_input_count, @@ -110,6 +121,7 @@ impl SourceMetrics { user_source_error_count, latest_message_id, rdkafka_native_metric, + connector_source_rows_received, } } } diff --git a/src/connector/src/source/nats/enumerator/mod.rs b/src/connector/src/source/nats/enumerator/mod.rs index 88384bfb685e6..e987a45188114 100644 --- a/src/connector/src/source/nats/enumerator/mod.rs +++ b/src/connector/src/source/nats/enumerator/mod.rs @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::sync::Arc; + use anyhow; use async_trait::async_trait; -use super::source::NatsSplit; +use super::source::{NatsOffset, NatsSplit}; use super::NatsProperties; -use crate::source::{SourceEnumeratorContextRef, SplitEnumerator}; +use crate::source::{SourceEnumeratorContextRef, SplitEnumerator, SplitId}; #[derive(Debug, Clone, Eq, PartialEq)] pub struct NatsSplitEnumerator { subject: String, - split_num: i32, + split_id: SplitId, } #[async_trait] @@ -36,7 +38,7 @@ impl SplitEnumerator for NatsSplitEnumerator { ) -> anyhow::Result { Ok(Self { subject: properties.common.subject, - split_num: 0, + split_id: Arc::from("0"), }) } @@ -44,8 +46,8 @@ impl SplitEnumerator for NatsSplitEnumerator { // TODO: to simplify the logic, return 1 split for first version let nats_split = NatsSplit { subject: self.subject.clone(), - split_num: 0, // be the same as `from_nats_jetstream_message` - start_sequence: None, + split_id: Arc::from("0"), // be the same as `from_nats_jetstream_message` + start_sequence: NatsOffset::None, }; Ok(vec![nats_split]) diff --git a/src/connector/src/source/nats/mod.rs b/src/connector/src/source/nats/mod.rs index 2aa9dc2de55f2..3e8cc57bc1da8 100644 --- a/src/connector/src/source/nats/mod.rs +++ b/src/connector/src/source/nats/mod.rs @@ -19,12 +19,30 @@ pub mod split; use serde::Deserialize; use crate::common::NatsCommon; +use crate::source::nats::enumerator::NatsSplitEnumerator; +use crate::source::nats::source::{NatsSplit, NatsSplitReader}; +use crate::source::SourceProperties; + pub const NATS_CONNECTOR: &str = "nats"; #[derive(Clone, Debug, Deserialize)] pub struct NatsProperties { #[serde(flatten)] pub common: NatsCommon, + + #[serde(rename = "scan.startup.mode")] + pub scan_startup_mode: Option, + + #[serde(rename = "scan.startup.timestamp_millis")] + pub start_time: Option, } impl NatsProperties {} + +impl SourceProperties for NatsProperties { + type Split = NatsSplit; + type SplitEnumerator = NatsSplitEnumerator; + type SplitReader = NatsSplitReader; + + const SOURCE_NAME: &'static str = NATS_CONNECTOR; +} diff --git a/src/connector/src/source/nats/source/message.rs b/src/connector/src/source/nats/source/message.rs index afb3029d3b917..e582df86664e8 100644 --- a/src/connector/src/source/nats/source/message.rs +++ b/src/connector/src/source/nats/source/message.rs @@ -13,19 +13,37 @@ // limitations under the License. use async_nats; +use async_nats::jetstream::Message; use crate::source::base::SourceMessage; -use crate::source::SourceMeta; +use crate::source::{SourceMeta, SplitId}; -impl SourceMessage { - pub fn from_nats_jetstream_message(message: async_nats::jetstream::message::Message) -> Self { +#[derive(Clone, Debug)] +pub struct NatsMessage { + pub split_id: SplitId, + pub sequence_number: String, + pub payload: Vec, +} + +impl From for SourceMessage { + fn from(message: NatsMessage) -> Self { SourceMessage { key: None, - payload: Some(message.message.payload.to_vec()), + payload: Some(message.payload), // For nats jetstream, use sequence id as offset - offset: message.info().unwrap().stream_sequence.to_string(), - split_id: "0".into(), + offset: message.sequence_number, + split_id: message.split_id, meta: SourceMeta::Empty, } } } + +impl NatsMessage { + pub fn new(split_id: SplitId, message: Message) -> Self { + NatsMessage { + split_id, + sequence_number: message.info().unwrap().stream_sequence.to_string(), + payload: message.message.payload.to_vec(), + } + } +} diff --git a/src/connector/src/source/nats/source/reader.rs b/src/connector/src/source/nats/source/reader.rs index c0070a16c1392..6e22748bcf468 100644 --- a/src/connector/src/source/nats/source/reader.rs +++ b/src/connector/src/source/nats/source/reader.rs @@ -12,18 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use anyhow::Result; +use anyhow::{anyhow, Result}; use async_nats::jetstream::consumer; use async_trait::async_trait; use futures::StreamExt; use futures_async_stream::try_stream; +use super::message::NatsMessage; +use super::{NatsOffset, NatsSplit}; use crate::parser::ParserConfig; use crate::source::common::{into_chunk_stream, CommonSplitReader}; -use crate::source::nats::split::NatsSplit; use crate::source::nats::NatsProperties; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SplitImpl, SplitReader, + BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SplitId, SplitReader, }; pub struct NatsSplitReader { @@ -31,34 +32,60 @@ pub struct NatsSplitReader { properties: NatsProperties, parser_config: ParserConfig, source_ctx: SourceContextRef, + start_position: NatsOffset, + split_id: SplitId, } #[async_trait] impl SplitReader for NatsSplitReader { type Properties = NatsProperties; + type Split = NatsSplit; async fn new( properties: NatsProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, ) -> Result { // TODO: to simplify the logic, return 1 split for first version assert!(splits.len() == 1); - let splits = splits - .into_iter() - .map(|split| split.into_nats().unwrap()) - .collect::>(); + let split = splits.into_iter().next().unwrap(); + let split_id = split.split_id; + let start_position = match &split.start_sequence { + NatsOffset::None => match &properties.scan_startup_mode { + None => NatsOffset::Earliest, + Some(mode) => match mode.as_str() { + "latest" => NatsOffset::Latest, + "earliest" => NatsOffset::Earliest, + "timestamp_millis" => { + if let Some(time) = &properties.start_time { + NatsOffset::Timestamp(time.parse()?) + } else { + return Err(anyhow!("scan_startup_timestamp_millis is required")); + } + } + _ => { + return Err(anyhow!( + "invalid scan_startup_mode, accept earliest/latest/timestamp_millis" + )) + } + }, + }, + start_position => start_position.to_owned(), + }; + let consumer = properties .common - .build_consumer(0, splits[0].start_sequence) + .build_consumer(split_id.to_string(), start_position.clone()) .await?; Ok(Self { consumer, properties, parser_config, source_ctx, + start_position, + split_id, }) } @@ -78,7 +105,10 @@ impl CommonSplitReader for NatsSplitReader { for msgs in messages.ready_chunks(capacity) { let mut msg_vec = Vec::with_capacity(capacity); for msg in msgs { - msg_vec.push(SourceMessage::from_nats_jetstream_message(msg?)); + msg_vec.push(SourceMessage::from(NatsMessage::new( + self.split_id.clone(), + msg?, + ))); } yield msg_vec; } diff --git a/src/connector/src/source/nats/split.rs b/src/connector/src/source/nats/split.rs index f0fcfaff35481..d9b3e11b98f87 100644 --- a/src/connector/src/source/nats/split.rs +++ b/src/connector/src/source/nats/split.rs @@ -18,20 +18,29 @@ use serde::{Deserialize, Serialize}; use crate::source::{SplitId, SplitMetaData}; +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] +pub enum NatsOffset { + Earliest, + Latest, + SequenceNumber(String), + Timestamp(i128), + None, +} + /// The states of a NATS split, which will be persisted to checkpoint. #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Hash)] pub struct NatsSplit { pub(crate) subject: String, // TODO: to simplify the logic, return 1 split for first version. May use parallelism in // future. - pub(crate) split_num: i32, - pub(crate) start_sequence: Option, + pub(crate) split_id: SplitId, + pub(crate) start_sequence: NatsOffset, } impl SplitMetaData for NatsSplit { fn id(&self) -> SplitId { // TODO: should avoid constructing a string every time - format!("{}", self.split_num).into() + format!("{}", self.split_id).into() } fn restore_from_json(value: JsonbVal) -> anyhow::Result { @@ -41,19 +50,24 @@ impl SplitMetaData for NatsSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_sequence: String) -> anyhow::Result<()> { + let start_sequence = if start_sequence.is_empty() { + NatsOffset::Earliest + } else { + NatsOffset::SequenceNumber(start_sequence) + }; + self.start_sequence = start_sequence; + Ok(()) + } } impl NatsSplit { - pub fn new(subject: String, split_num: i32, start_sequence: Option) -> Self { + pub fn new(subject: String, split_id: SplitId, start_sequence: NatsOffset) -> Self { Self { subject, - split_num, + split_id, start_sequence, } } - - pub fn update_with_offset(&mut self, start_sequence: String) -> anyhow::Result<()> { - self.start_sequence = Some(start_sequence.as_str().parse::().unwrap()); - Ok(()) - } } diff --git a/src/connector/src/source/nexmark/mod.rs b/src/connector/src/source/nexmark/mod.rs index 679306cf96b22..e1f75ae1008e7 100644 --- a/src/connector/src/source/nexmark/mod.rs +++ b/src/connector/src/source/nexmark/mod.rs @@ -25,6 +25,9 @@ use serde::Deserialize; use serde_with::{serde_as, DisplayFromStr}; pub use split::*; +use crate::source::nexmark::source::reader::NexmarkSplitReader; +use crate::source::SourceProperties; + pub const NEXMARK_CONNECTOR: &str = "nexmark"; const fn identity_i32() -> i32 { @@ -217,6 +220,14 @@ pub struct NexmarkPropertiesInner { pub threads: Option, } +impl SourceProperties for NexmarkProperties { + type Split = NexmarkSplit; + type SplitEnumerator = NexmarkSplitEnumerator; + type SplitReader = NexmarkSplitReader; + + const SOURCE_NAME: &'static str = NEXMARK_CONNECTOR; +} + fn default_event_num() -> u64 { u64::MAX } diff --git a/src/connector/src/source/nexmark/source/reader.rs b/src/connector/src/source/nexmark/source/reader.rs index 190a3f1cbb63d..a2ca20f1a1f0b 100644 --- a/src/connector/src/source/nexmark/source/reader.rs +++ b/src/connector/src/source/nexmark/source/reader.rs @@ -33,8 +33,8 @@ use crate::source::nexmark::source::combined_event::{ }; use crate::source::nexmark::{NexmarkProperties, NexmarkSplit}; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SplitId, SplitImpl, SplitMetaData, - SplitReader, StreamChunkWithState, + BoxSourceWithStateStream, Column, SourceContextRef, SplitId, SplitMetaData, SplitReader, + StreamChunkWithState, }; #[derive(Debug)] @@ -55,11 +55,12 @@ pub struct NexmarkSplitReader { #[async_trait] impl SplitReader for NexmarkSplitReader { type Properties = NexmarkProperties; + type Split = NexmarkSplit; #[allow(clippy::unused_async)] async fn new( properties: NexmarkProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, @@ -67,7 +68,7 @@ impl SplitReader for NexmarkSplitReader { tracing::debug!("Splits for nexmark found! {:?}", splits); assert!(splits.len() == 1); // TODO: currently, assume there's only one split in one reader - let split = splits.into_iter().next().unwrap().into_nexmark().unwrap(); + let split = splits.into_iter().next().unwrap(); let split_id = split.id(); let split_index = split.split_index as u64; @@ -182,7 +183,7 @@ mod tests { use super::*; use crate::source::nexmark::{NexmarkPropertiesInner, NexmarkSplitEnumerator}; - use crate::source::{SourceEnumeratorContext, SplitEnumerator, SplitImpl}; + use crate::source::{SourceEnumeratorContext, SplitEnumerator}; #[tokio::test] async fn test_nexmark_split_reader() -> Result<()> { @@ -197,12 +198,7 @@ mod tests { let mut enumerator = NexmarkSplitEnumerator::new(props.clone(), SourceEnumeratorContext::default().into()) .await?; - let list_splits_resp: Vec = enumerator - .list_splits() - .await? - .into_iter() - .map(SplitImpl::Nexmark) - .collect(); + let list_splits_resp: Vec<_> = enumerator.list_splits().await?.into_iter().collect(); assert_eq!(list_splits_resp.len(), 2); diff --git a/src/connector/src/source/nexmark/split.rs b/src/connector/src/source/nexmark/split.rs index e1730993f5fbc..221fa20cbfe48 100644 --- a/src/connector/src/source/nexmark/split.rs +++ b/src/connector/src/source/nexmark/split.rs @@ -38,6 +38,11 @@ impl SplitMetaData for NexmarkSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + self.start_offset = Some(start_offset.as_str().parse::().unwrap()); + Ok(()) + } } impl NexmarkSplit { @@ -48,9 +53,4 @@ impl NexmarkSplit { start_offset, } } - - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - self.start_offset = Some(start_offset.as_str().parse::().unwrap()); - Ok(()) - } } diff --git a/src/connector/src/source/pulsar/enumerator/client.rs b/src/connector/src/source/pulsar/enumerator/client.rs index e7ec559cf91b8..32362fb156f42 100644 --- a/src/connector/src/source/pulsar/enumerator/client.rs +++ b/src/connector/src/source/pulsar/enumerator/client.rs @@ -46,8 +46,8 @@ impl SplitEnumerator for PulsarSplitEnumerator { properties: PulsarProperties, _context: SourceEnumeratorContextRef, ) -> Result { - let pulsar = properties.build_pulsar_client().await?; - let topic = properties.topic; + let pulsar = properties.common.build_client().await?; + let topic = properties.common.topic; let parsed_topic = parse_topic(&topic)?; let mut scan_start_offset = match properties diff --git a/src/connector/src/source/pulsar/mod.rs b/src/connector/src/source/pulsar/mod.rs index 5d2bbfa332307..544d1b7fb3ed3 100644 --- a/src/connector/src/source/pulsar/mod.rs +++ b/src/connector/src/source/pulsar/mod.rs @@ -17,127 +17,32 @@ pub mod source; pub mod split; pub mod topic; -use std::collections::HashMap; -use std::io::Write; - -use anyhow::{anyhow, Result}; pub use enumerator::*; -use pulsar::authentication::oauth2::{OAuth2Authentication, OAuth2Params}; -use pulsar::{Authentication, Pulsar, TokioExecutor}; -use risingwave_common::error::ErrorCode::InvalidParameterValue; -use risingwave_common::error::RwError; use serde::Deserialize; pub use split::*; -use tempfile::NamedTempFile; -use url::Url; -use crate::aws_auth::AwsAuthProps; -use crate::aws_utils::load_file_descriptor_from_s3; +use crate::common::PulsarCommon; +use crate::source::pulsar::source::reader::PulsarSplitReader; +use crate::source::SourceProperties; pub const PULSAR_CONNECTOR: &str = "pulsar"; -#[derive(Clone, Debug, Deserialize)] -pub struct PulsarOauth { - #[serde(rename = "oauth.issuer.url")] - pub issuer_url: String, - - #[serde(rename = "oauth.credentials.url")] - pub credentials_url: String, - - #[serde(rename = "oauth.audience")] - pub audience: String, +impl SourceProperties for PulsarProperties { + type Split = PulsarSplit; + type SplitEnumerator = PulsarSplitEnumerator; + type SplitReader = PulsarSplitReader; - #[serde(rename = "oauth.scope")] - pub scope: Option, - - #[serde(flatten)] - /// required keys refer to [`crate::aws_utils::AWS_DEFAULT_CONFIG`] - pub s3_credentials: HashMap, + const SOURCE_NAME: &'static str = PULSAR_CONNECTOR; } #[derive(Clone, Debug, Deserialize)] pub struct PulsarProperties { - #[serde(rename = "topic", alias = "pulsar.topic")] - pub topic: String, - - #[serde(rename = "service.url", alias = "pulsar.service.url")] - pub service_url: String, - #[serde(rename = "scan.startup.mode", alias = "pulsar.scan.startup.mode")] pub scan_startup_mode: Option, #[serde(rename = "scan.startup.timestamp_millis", alias = "pulsar.time.offset")] pub time_offset: Option, - #[serde(rename = "auth.token")] - pub auth_token: Option, - #[serde(flatten)] - pub oauth: Option, -} - -impl PulsarProperties { - pub async fn build_pulsar_client(&self) -> Result> { - let mut pulsar_builder = Pulsar::builder(&self.service_url, TokioExecutor); - let mut temp_file = None; - if let Some(oauth) = &self.oauth { - let url = Url::parse(&oauth.credentials_url)?; - match url.scheme() { - "s3" => { - let credentials = load_file_descriptor_from_s3( - &url, - &AwsAuthProps::from_pairs( - oauth - .s3_credentials - .iter() - .map(|(k, v)| (k.as_str(), v.as_str())), - ), - ) - .await?; - let mut f = NamedTempFile::new()?; - f.write_all(&credentials)?; - f.as_file().sync_all()?; - temp_file = Some(f); - } - "file" => {} - _ => { - return Err(RwError::from(InvalidParameterValue(String::from( - "invalid credentials_url, only file url and s3 url are supported", - ))) - .into()); - } - } - - let auth_params = OAuth2Params { - issuer_url: oauth.issuer_url.clone(), - credentials_url: if temp_file.is_none() { - oauth.credentials_url.clone() - } else { - let mut raw_path = temp_file - .as_ref() - .unwrap() - .path() - .to_str() - .unwrap() - .to_string(); - raw_path.insert_str(0, "file://"); - raw_path - }, - audience: Some(oauth.audience.clone()), - scope: oauth.scope.clone(), - }; - - pulsar_builder = pulsar_builder - .with_auth_provider(OAuth2Authentication::client_credentials(auth_params)); - } else if let Some(auth_token) = &self.auth_token { - pulsar_builder = pulsar_builder.with_auth(Authentication { - name: "token".to_string(), - data: Vec::from(auth_token.as_str()), - }); - } - - let res = pulsar_builder.build().await.map_err(|e| anyhow!(e))?; - drop(temp_file); - Ok(res) - } + pub common: PulsarCommon, } diff --git a/src/connector/src/source/pulsar/source/reader.rs b/src/connector/src/source/pulsar/source/reader.rs index 9c749c27d616a..85d85a8d18714 100644 --- a/src/connector/src/source/pulsar/source/reader.rs +++ b/src/connector/src/source/pulsar/source/reader.rs @@ -22,15 +22,13 @@ use itertools::Itertools; use pulsar::consumer::InitialPosition; use pulsar::message::proto::MessageIdData; use pulsar::{Consumer, ConsumerBuilder, ConsumerOptions, Pulsar, SubType, TokioExecutor}; -use risingwave_common::try_match_expand; use crate::parser::ParserConfig; -use crate::source::common::{into_chunk_stream, CommonSplitReader}; use crate::source::pulsar::split::PulsarSplit; use crate::source::pulsar::{PulsarEnumeratorOffset, PulsarProperties}; use crate::source::{ - BoxSourceWithStateStream, Column, SourceContextRef, SourceMessage, SplitId, SplitImpl, - SplitMetaData, SplitReader, + into_chunk_stream, BoxSourceWithStateStream, Column, CommonSplitReader, SourceContextRef, + SourceMessage, SplitId, SplitMetaData, SplitReader, }; pub struct PulsarSplitReader { @@ -88,17 +86,18 @@ fn parse_message_id(id: &str) -> Result { #[async_trait] impl SplitReader for PulsarSplitReader { type Properties = PulsarProperties; + type Split = PulsarSplit; async fn new( props: PulsarProperties, - splits: Vec, + splits: Vec, parser_config: ParserConfig, source_ctx: SourceContextRef, _columns: Option>, ) -> Result { ensure!(splits.len() == 1, "only support single split"); - let split = try_match_expand!(splits.into_iter().next().unwrap(), SplitImpl::Pulsar)?; - let pulsar = props.build_pulsar_client().await?; + let split = splits.into_iter().next().unwrap(); + let pulsar = props.common.build_client().await?; let topic = split.topic.to_string(); tracing::debug!("creating consumer for pulsar split topic {}", topic,); diff --git a/src/connector/src/source/pulsar/split.rs b/src/connector/src/source/pulsar/split.rs index 3f7f1424f0ea3..5e546c0473519 100644 --- a/src/connector/src/source/pulsar/split.rs +++ b/src/connector/src/source/pulsar/split.rs @@ -26,19 +26,6 @@ pub struct PulsarSplit { pub(crate) start_offset: PulsarEnumeratorOffset, } -impl PulsarSplit { - pub fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { - let start_offset = if start_offset.is_empty() { - PulsarEnumeratorOffset::Earliest - } else { - PulsarEnumeratorOffset::MessageId(start_offset) - }; - - self.start_offset = start_offset; - Ok(()) - } -} - impl SplitMetaData for PulsarSplit { fn id(&self) -> SplitId { // TODO: should avoid constructing a string every time @@ -52,4 +39,15 @@ impl SplitMetaData for PulsarSplit { fn encode_to_json(&self) -> JsonbVal { serde_json::to_value(self.clone()).unwrap().into() } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + let start_offset = if start_offset.is_empty() { + PulsarEnumeratorOffset::Earliest + } else { + PulsarEnumeratorOffset::MessageId(start_offset) + }; + + self.start_offset = start_offset; + Ok(()) + } } diff --git a/src/connector/src/source/test_source.rs b/src/connector/src/source/test_source.rs new file mode 100644 index 0000000000000..743ae3b179427 --- /dev/null +++ b/src/connector/src/source/test_source.rs @@ -0,0 +1,239 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::sync::{Arc, OnceLock}; + +use anyhow::anyhow; +use async_trait::async_trait; +use parking_lot::Mutex; +use risingwave_common::types::JsonbVal; +use serde_derive::{Deserialize, Serialize}; + +use crate::parser::ParserConfig; +use crate::source::{ + BoxSourceWithStateStream, Column, SourceContextRef, SourceEnumeratorContextRef, + SourceProperties, SplitEnumerator, SplitId, SplitMetaData, SplitReader, TryFromHashmap, +}; + +pub type BoxListSplits = Box< + dyn FnMut( + TestSourceProperties, + SourceEnumeratorContextRef, + ) -> anyhow::Result> + + Send + + 'static, +>; + +pub type BoxIntoSourceStream = Box< + dyn FnMut( + TestSourceProperties, + Vec, + ParserConfig, + SourceContextRef, + Option>, + ) -> BoxSourceWithStateStream + + Send + + 'static, +>; + +pub struct BoxSource { + list_split: BoxListSplits, + into_source_stream: BoxIntoSourceStream, +} + +impl BoxSource { + pub fn new( + list_splits: impl FnMut( + TestSourceProperties, + SourceEnumeratorContextRef, + ) -> anyhow::Result> + + Send + + 'static, + into_source_stream: impl FnMut( + TestSourceProperties, + Vec, + ParserConfig, + SourceContextRef, + Option>, + ) -> BoxSourceWithStateStream + + Send + + 'static, + ) -> BoxSource { + BoxSource { + list_split: Box::new(list_splits), + into_source_stream: Box::new(into_source_stream), + } + } +} + +struct TestSourceRegistry { + box_source: Arc>>, +} + +impl TestSourceRegistry { + fn new() -> Self { + TestSourceRegistry { + box_source: Arc::new(Mutex::new(None)), + } + } +} + +fn get_registry() -> &'static TestSourceRegistry { + static GLOBAL_REGISTRY: OnceLock = OnceLock::new(); + GLOBAL_REGISTRY.get_or_init(TestSourceRegistry::new) +} + +pub struct TestSourceRegistryGuard; + +impl Drop for TestSourceRegistryGuard { + fn drop(&mut self) { + assert!(get_registry().box_source.lock().take().is_some()); + } +} + +pub fn registry_test_source(box_source: BoxSource) -> TestSourceRegistryGuard { + assert!(get_registry() + .box_source + .lock() + .replace(box_source) + .is_none()); + TestSourceRegistryGuard +} + +pub const TEST_CONNECTOR: &str = "test"; + +#[derive(Clone, Debug)] +pub struct TestSourceProperties { + properties: HashMap, +} + +impl TryFromHashmap for TestSourceProperties { + fn try_from_hashmap(props: HashMap) -> anyhow::Result { + if cfg!(any(madsim, test)) { + Ok(TestSourceProperties { properties: props }) + } else { + Err(anyhow!("test source only available at test")) + } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct TestSourceSplit { + pub id: SplitId, + pub properties: HashMap, + pub offset: String, +} + +impl SplitMetaData for TestSourceSplit { + fn id(&self) -> SplitId { + self.id.clone() + } + + fn encode_to_json(&self) -> JsonbVal { + serde_json::to_value(self.clone()).unwrap().into() + } + + fn restore_from_json(value: JsonbVal) -> anyhow::Result { + serde_json::from_value(value.take()).map_err(|e| anyhow!(e)) + } + + fn update_with_offset(&mut self, start_offset: String) -> anyhow::Result<()> { + self.offset = start_offset; + Ok(()) + } +} + +pub struct TestSourceSplitEnumerator { + properties: TestSourceProperties, + context: SourceEnumeratorContextRef, +} + +#[async_trait] +impl SplitEnumerator for TestSourceSplitEnumerator { + type Properties = TestSourceProperties; + type Split = TestSourceSplit; + + async fn new( + properties: Self::Properties, + context: SourceEnumeratorContextRef, + ) -> anyhow::Result { + Ok(Self { + properties, + context, + }) + } + + async fn list_splits(&mut self) -> anyhow::Result> { + (get_registry() + .box_source + .lock() + .as_mut() + .expect("should have init") + .list_split)(self.properties.clone(), self.context.clone()) + } +} + +pub struct TestSourceSplitReader { + properties: TestSourceProperties, + state: Vec, + parser_config: ParserConfig, + source_ctx: SourceContextRef, + columns: Option>, +} + +#[async_trait] +impl SplitReader for TestSourceSplitReader { + type Properties = TestSourceProperties; + type Split = TestSourceSplit; + + async fn new( + properties: Self::Properties, + state: Vec, + parser_config: ParserConfig, + source_ctx: SourceContextRef, + columns: Option>, + ) -> anyhow::Result { + Ok(Self { + properties, + state, + parser_config, + source_ctx, + columns, + }) + } + + fn into_stream(self) -> BoxSourceWithStateStream { + (get_registry() + .box_source + .lock() + .as_mut() + .expect("should have init") + .into_source_stream)( + self.properties, + self.state, + self.parser_config, + self.source_ctx, + self.columns, + ) + } +} + +impl SourceProperties for TestSourceProperties { + type Split = TestSourceSplit; + type SplitEnumerator = TestSourceSplitEnumerator; + type SplitReader = TestSourceSplitReader; + + const SOURCE_NAME: &'static str = TEST_CONNECTOR; +} diff --git a/src/ctl/Cargo.toml b/src/ctl/Cargo.toml index 50a6843b14013..061783b3d6dfb 100644 --- a/src/ctl/Cargo.toml +++ b/src/ctl/Cargo.toml @@ -23,7 +23,7 @@ etcd-client = { workspace = true } futures = { version = "0.3", default-features = false, features = ["alloc"] } inquire = "0.6.2" itertools = "0.11" -regex = "1.9.5" +regex = "1.10.0" risingwave_common = { workspace = true } risingwave_connector = { workspace = true } risingwave_frontend = { workspace = true } diff --git a/src/ctl/src/cmd_impl.rs b/src/ctl/src/cmd_impl.rs index 97a8f2c24a3f4..83fb9402e0c7a 100644 --- a/src/ctl/src/cmd_impl.rs +++ b/src/ctl/src/cmd_impl.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod await_tree; pub mod bench; pub mod compute; pub mod debug; @@ -20,4 +21,3 @@ pub mod meta; pub mod profile; pub mod scale; pub mod table; -pub mod trace; diff --git a/src/ctl/src/cmd_impl/trace.rs b/src/ctl/src/cmd_impl/await_tree.rs similarity index 50% rename from src/ctl/src/cmd_impl/trace.rs rename to src/ctl/src/cmd_impl/await_tree.rs index dec02108ce115..f01b07b6cd105 100644 --- a/src/ctl/src/cmd_impl/trace.rs +++ b/src/ctl/src/cmd_impl/await_tree.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::BTreeMap; - use risingwave_common::util::addr::HostAddr; use risingwave_pb::common::WorkerType; use risingwave_pb::monitor_service::StackTraceResponse; @@ -21,66 +19,62 @@ use risingwave_rpc_client::{CompactorClient, ComputeClientPool}; use crate::CtlContext; -pub async fn trace(context: &CtlContext) -> anyhow::Result<()> { - let meta_client = context.meta_client().await?; +fn merge(a: &mut StackTraceResponse, b: StackTraceResponse) { + a.actor_traces.extend(b.actor_traces); + a.rpc_traces.extend(b.rpc_traces); + a.compaction_task_traces.extend(b.compaction_task_traces); +} - let workers = meta_client.get_cluster_info().await?.worker_nodes; - let compute_nodes = workers - .into_iter() - .filter(|w| w.r#type() == WorkerType::ComputeNode); +pub async fn dump(context: &CtlContext) -> anyhow::Result<()> { + let mut all = Default::default(); - let clients = ComputeClientPool::default(); + let meta_client = context.meta_client().await?; - let mut all_actor_traces = BTreeMap::new(); - let mut all_rpc_traces = BTreeMap::new(); + let compute_nodes = meta_client + .list_worker_nodes(WorkerType::ComputeNode) + .await?; + let clients = ComputeClientPool::default(); // FIXME: the compute node may not be accessible directly from risectl, we may let the meta // service collect the reports from all compute nodes in the future. for cn in compute_nodes { let client = clients.get(&cn).await?; - let StackTraceResponse { - actor_traces, - rpc_traces, - .. - } = client.stack_trace().await?; - - all_actor_traces.extend(actor_traces); - all_rpc_traces.extend(rpc_traces.into_iter().map(|(k, v)| { - ( - format!("{} ({})", HostAddr::from(cn.get_host().unwrap()), k), - v, - ) - })); - } - - if all_actor_traces.is_empty() && all_rpc_traces.is_empty() { - println!("No traces found. No actors are running, or `--async-stack-trace` not set?"); - } else { - println!("--- Actor Traces ---"); - for (key, trace) in all_actor_traces { - println!(">> Actor {key}\n{trace}"); - } - println!("--- RPC Traces ---"); - for (key, trace) in all_rpc_traces { - println!(">> RPC {key}\n{trace}"); - } + let response = client.stack_trace().await?; + merge(&mut all, response); } let compactor_nodes = meta_client.list_worker_nodes(WorkerType::Compactor).await?; - let mut all_compaction_task_traces = BTreeMap::new(); + for compactor in compactor_nodes { let addr: HostAddr = compactor.get_host().unwrap().into(); let client = CompactorClient::new(addr).await?; - let StackTraceResponse { - compaction_task_traces, - .. - } = client.stack_trace().await?; - all_compaction_task_traces.extend(compaction_task_traces); + let response = client.stack_trace().await?; + merge(&mut all, response); } - if !all_compaction_task_traces.is_empty() { - println!("--- Compactor Traces ---"); - for (key, trace) in all_compaction_task_traces { - println!(">> Compaction Task {key}\n{trace}"); + + if all.actor_traces.is_empty() + && all.rpc_traces.is_empty() + && all.compaction_task_traces.is_empty() + { + println!("No traces found. No actors are running, or `--async-stack-trace` not set?"); + } else { + if !all.actor_traces.is_empty() { + println!("--- Actor Traces ---"); + for (key, trace) in all.actor_traces { + println!(">> Actor {key}\n{trace}"); + } + } + if !all.rpc_traces.is_empty() { + println!("\n\n--- RPC Traces ---"); + for (key, trace) in all.rpc_traces { + println!(">> RPC {key}\n{trace}"); + } + } + if !all.compaction_task_traces.is_empty() { + println!("\n\n--- Compactor Traces ---"); + for (key, trace) in all.compaction_task_traces { + println!(">> Compaction Task {key}\n{trace}"); + } } } diff --git a/src/ctl/src/cmd_impl/bench.rs b/src/ctl/src/cmd_impl/bench.rs index 823febed50a43..7dfd798a2b5be 100644 --- a/src/ctl/src/cmd_impl/bench.rs +++ b/src/ctl/src/cmd_impl/bench.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Bound; +use std::ops::Bound::Unbounded; use std::sync::atomic::AtomicU64; use std::sync::Arc; use std::time::Instant; @@ -20,6 +22,7 @@ use anyhow::Result; use clap::Subcommand; use futures::future::try_join_all; use futures::{pin_mut, Future, StreamExt}; +use risingwave_common::row::{self, OwnedRow}; use risingwave_common::util::epoch::EpochPair; use risingwave_storage::store::PrefetchOptions; use size::Size; @@ -102,8 +105,14 @@ pub async fn do_bench(context: &CtlContext, cmd: BenchCommands) -> Result<()> { tb }; loop { + let sub_range: &(Bound, Bound) = + &(Unbounded, Unbounded); let stream = state_table - .iter_row(PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + row::empty(), + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await?; pin_mut!(stream); iter_cnt.fetch_add(1, std::sync::atomic::Ordering::Relaxed); diff --git a/src/ctl/src/cmd_impl/hummock/compaction_group.rs b/src/ctl/src/cmd_impl/hummock/compaction_group.rs index 75a9884aece75..d4101a7a958db 100644 --- a/src/ctl/src/cmd_impl/hummock/compaction_group.rs +++ b/src/ctl/src/cmd_impl/hummock/compaction_group.rs @@ -60,6 +60,8 @@ pub fn build_compaction_config_vec( max_space_reclaim_bytes: Option, level0_max_compact_file_number: Option, level0_overlapping_sub_level_compact_level_count: Option, + enable_emergency_picker: Option, + tombstone_reclaim_ratio: Option, ) -> Vec { let mut configs = vec![]; if let Some(c) = max_bytes_for_level_base { @@ -101,6 +103,13 @@ pub fn build_compaction_config_vec( if let Some(c) = level0_overlapping_sub_level_compact_level_count { configs.push(MutableConfig::Level0OverlappingSubLevelCompactLevelCount(c)) } + if let Some(c) = enable_emergency_picker { + configs.push(MutableConfig::EnableEmergencyPicker(c)) + } + if let Some(c) = tombstone_reclaim_ratio { + configs.push(MutableConfig::TombstoneReclaimRatio(c)) + } + configs } @@ -220,3 +229,34 @@ pub async fn list_compaction_status(context: &CtlContext, verbose: bool) -> anyh } Ok(()) } + +pub async fn get_compaction_score( + context: &CtlContext, + id: CompactionGroupId, +) -> anyhow::Result<()> { + let meta_client = context.meta_client().await?; + let scores = meta_client.get_compaction_score(id).await?; + let mut table = Table::new(); + table.set_header({ + let mut row = Row::new(); + row.add_cell("Select Level".into()); + row.add_cell("Target Level".into()); + row.add_cell("Type".into()); + row.add_cell("Score".into()); + row + }); + for s in scores.into_iter().sorted_by(|a, b| { + a.select_level + .cmp(&b.select_level) + .then_with(|| a.target_level.cmp(&b.target_level)) + }) { + let mut row = Row::new(); + row.add_cell(s.select_level.into()); + row.add_cell(s.target_level.into()); + row.add_cell(s.picker_type.into()); + row.add_cell(s.score.into()); + table.add_row(row); + } + println!("{table}"); + Ok(()) +} diff --git a/src/ctl/src/cmd_impl/hummock/list_version.rs b/src/ctl/src/cmd_impl/hummock/list_version.rs index 6935dcf604142..3973860d9e30e 100644 --- a/src/ctl/src/cmd_impl/hummock/list_version.rs +++ b/src/ctl/src/cmd_impl/hummock/list_version.rs @@ -148,3 +148,9 @@ pub async fn list_pinned_snapshots(context: &CtlContext) -> anyhow::Result<()> { } Ok(()) } + +pub async fn rebuild_table_stats(context: &CtlContext) -> anyhow::Result<()> { + let meta_client = context.meta_client().await?; + meta_client.risectl_rebuild_table_stats().await?; + Ok(()) +} diff --git a/src/ctl/src/cmd_impl/hummock/sst_dump.rs b/src/ctl/src/cmd_impl/hummock/sst_dump.rs index 8208d17b5abe3..4f957cd508862 100644 --- a/src/ctl/src/cmd_impl/hummock/sst_dump.rs +++ b/src/ctl/src/cmd_impl/hummock/sst_dump.rs @@ -31,7 +31,7 @@ use risingwave_frontend::TableCatalog; use risingwave_hummock_sdk::compaction_group::hummock_version_ext::HummockVersionExt; use risingwave_hummock_sdk::key::FullKey; use risingwave_hummock_sdk::HummockSstableObjectId; -use risingwave_object_store::object::{BlockLocation, ObjectMetadata, ObjectStoreImpl}; +use risingwave_object_store::object::{ObjectMetadata, ObjectStoreImpl}; use risingwave_pb::hummock::{Level, SstableInfo}; use risingwave_rpc_client::MetaClient; use risingwave_storage::hummock::value::HummockValue; @@ -176,20 +176,15 @@ async fn get_meta_offset_from_object( obj: &ObjectMetadata, obj_store: &ObjectStoreImpl, ) -> anyhow::Result { - let meta_offset_loc = BlockLocation { - offset: obj.total_size - - ( - // version, magic - 2 * std::mem::size_of::() + - // footer, checksum - 2 * std::mem::size_of::() - ), - size: std::mem::size_of::(), - }; - Ok(obj_store - .read(&obj.key, Some(meta_offset_loc)) - .await? - .get_u64_le()) + let start = obj.total_size + - ( + // version, magic + 2 * std::mem::size_of::() + + // footer, checksum + 2 * std::mem::size_of::() + ); + let end = start + std::mem::size_of::(); + Ok(obj_store.read(&obj.key, start..end).await?.get_u64_le()) } pub async fn sst_dump_via_sstable_store( @@ -281,11 +276,8 @@ async fn print_block( // Retrieve encoded block data in bytes let store = sstable_store.store(); - let block_loc = BlockLocation { - offset: block_meta.offset as usize, - size: block_meta.len as usize, - }; - let block_data = store.read(&data_path, Some(block_loc)).await?; + let range = block_meta.offset as usize..block_meta.offset as usize + block_meta.len as usize; + let block_data = store.read(&data_path, range).await?; // Retrieve checksum and compression algorithm used from the encoded block data let len = block_data.len(); diff --git a/src/ctl/src/cmd_impl/meta/backup_meta.rs b/src/ctl/src/cmd_impl/meta/backup_meta.rs index 77c7f0edb7ca2..3238e22b35050 100644 --- a/src/ctl/src/cmd_impl/meta/backup_meta.rs +++ b/src/ctl/src/cmd_impl/meta/backup_meta.rs @@ -22,21 +22,33 @@ pub async fn backup_meta(context: &CtlContext) -> anyhow::Result<()> { let meta_client = context.meta_client().await?; let job_id = meta_client.backup_meta().await?; loop { - let job_status = meta_client.get_backup_job_status(job_id).await?; + let (job_status, message) = meta_client.get_backup_job_status(job_id).await?; match job_status { BackupJobStatus::Running => { - tracing::info!("backup job is still running: job {}", job_id); + tracing::info!("backup job is still running: job {}, {}", job_id, message); tokio::time::sleep(Duration::from_secs(1)).await; } BackupJobStatus::Succeeded => { + tracing::info!("backup job succeeded: job {}, {}", job_id, message); break; } - _ => { - return Err(anyhow::anyhow!("backup job failed: job {}", job_id)); + BackupJobStatus::NotFound => { + return Err(anyhow::anyhow!( + "backup job status not found: job {}, {}", + job_id, + message + )); } + BackupJobStatus::Failed => { + return Err(anyhow::anyhow!( + "backup job failed: job {}, {}", + job_id, + message + )); + } + _ => unreachable!("unknown backup job status"), } } - tracing::info!("backup job succeeded: job {}", job_id); Ok(()) } diff --git a/src/ctl/src/cmd_impl/profile.rs b/src/ctl/src/cmd_impl/profile.rs index 0069adee37687..fc487f1d2c17d 100644 --- a/src/ctl/src/cmd_impl/profile.rs +++ b/src/ctl/src/cmd_impl/profile.rs @@ -82,7 +82,8 @@ pub async fn cpu_profile(context: &CtlContext, sleep_s: u64) -> anyhow::Result<( Ok(()) } -pub async fn heap_profile(context: &CtlContext, dir: String) -> anyhow::Result<()> { +pub async fn heap_profile(context: &CtlContext, dir: Option) -> anyhow::Result<()> { + let dir = dir.unwrap_or_default(); let meta_client = context.meta_client().await?; let workers = meta_client.get_cluster_info().await?.worker_nodes; diff --git a/src/ctl/src/common/hummock_service.rs b/src/ctl/src/common/hummock_service.rs index fe3ffefbbdcc6..8b86892781d52 100644 --- a/src/ctl/src/common/hummock_service.rs +++ b/src/ctl/src/common/hummock_service.rs @@ -169,6 +169,7 @@ For `./risedev apply-compose-deploy` users, 0, FileCache::none(), FileCache::none(), + None, ))) } } diff --git a/src/ctl/src/lib.rs b/src/ctl/src/lib.rs index adcb6cf3b3472..cedcf4922e404 100644 --- a/src/ctl/src/lib.rs +++ b/src/ctl/src/lib.rs @@ -19,6 +19,7 @@ use anyhow::Result; use clap::{Parser, Subcommand}; use cmd_impl::bench::BenchCommands; use cmd_impl::hummock::SstDumpArgs; +use risingwave_meta::backup_restore::RestoreOpts; use risingwave_pb::meta::update_worker_node_schedulability_request::Schedulability; use crate::cmd_impl::hummock::{ @@ -67,8 +68,9 @@ enum Commands { /// Commands for Debug #[clap(subcommand)] Debug(DebugCommands), - /// Commands for tracing the compute nodes - Trace, + /// Dump the await-tree of compute nodes and compactors + #[clap(visible_alias("trace"))] + AwaitTree, // TODO(yuhao): profile other nodes /// Commands for profilng the compute nodes #[clap(subcommand)] @@ -220,6 +222,10 @@ enum HummockCommands { level0_max_compact_file_number: Option, #[clap(long)] level0_overlapping_sub_level_compact_level_count: Option, + #[clap(long)] + enable_emergency_picker: Option, + #[clap(long)] + tombstone_reclaim_ratio: Option, }, /// Split given compaction group into two. Moves the given tables to the new group. SplitCompactionGroup { @@ -239,8 +245,14 @@ enum HummockCommands { #[clap(short, long = "verbose", default_value_t = false)] verbose: bool, }, + GetCompactionScore { + #[clap(long)] + compaction_group_id: u64, + }, /// Validate the current HummockVersion. ValidateVersion, + /// Rebuild table stats + RebuildTableStats, } #[derive(Subcommand)] @@ -310,7 +322,7 @@ pub struct ScaleCommon { /// Specify the fragment ids that need to be scheduled. /// empty by default, which means all fragments will be scheduled - #[clap(long)] + #[clap(long, value_delimiter = ',')] fragments: Option>, } @@ -323,13 +335,14 @@ pub struct ScaleVerticalCommands { /// supported #[clap( long, + required = true, value_delimiter = ',', value_name = "all or worker_id or worker_host, ..." )] workers: Option>, /// The target parallelism per worker, requires `workers` to be set. - #[clap(long, requires = "workers")] + #[clap(long, required = true)] target_parallelism_per_worker: Option, } @@ -416,6 +429,11 @@ enum MetaCommands { }, /// backup meta by taking a meta snapshot BackupMeta, + /// restore meta by recovering from a meta snapshot + RestoreMeta { + #[command(flatten)] + opts: RestoreOpts, + }, /// delete meta snapshots DeleteMetaSnapshots { snapshot_ids: Vec }, @@ -466,7 +484,7 @@ pub enum ProfileCommands { Heap { /// The output directory of the dumped file #[clap(long = "dir")] - dir: String, + dir: Option, }, } @@ -549,6 +567,8 @@ pub async fn start_impl(opts: CliOpts, context: &CtlContext) -> Result<()> { max_space_reclaim_bytes, level0_max_compact_file_number, level0_overlapping_sub_level_compact_level_count, + enable_emergency_picker, + tombstone_reclaim_ratio, }) => { cmd_impl::hummock::update_compaction_config( context, @@ -567,6 +587,8 @@ pub async fn start_impl(opts: CliOpts, context: &CtlContext) -> Result<()> { max_space_reclaim_bytes, level0_max_compact_file_number, level0_overlapping_sub_level_compact_level_count, + enable_emergency_picker, + tombstone_reclaim_ratio, ), ) .await? @@ -590,9 +612,17 @@ pub async fn start_impl(opts: CliOpts, context: &CtlContext) -> Result<()> { Commands::Hummock(HummockCommands::ListCompactionStatus { verbose }) => { cmd_impl::hummock::list_compaction_status(context, verbose).await?; } + Commands::Hummock(HummockCommands::GetCompactionScore { + compaction_group_id, + }) => { + cmd_impl::hummock::get_compaction_score(context, compaction_group_id).await?; + } Commands::Hummock(HummockCommands::ValidateVersion) => { cmd_impl::hummock::validate_version(context).await?; } + Commands::Hummock(HummockCommands::RebuildTableStats) => { + cmd_impl::hummock::rebuild_table_stats(context).await?; + } Commands::Table(TableCommands::Scan { mv_name, data_dir }) => { cmd_impl::table::scan(context, mv_name, data_dir).await? } @@ -618,6 +648,9 @@ pub async fn start_impl(opts: CliOpts, context: &CtlContext) -> Result<()> { .await? } Commands::Meta(MetaCommands::BackupMeta) => cmd_impl::meta::backup_meta(context).await?, + Commands::Meta(MetaCommands::RestoreMeta { opts }) => { + risingwave_meta::backup_restore::restore(opts).await? + } Commands::Meta(MetaCommands::DeleteMetaSnapshots { snapshot_ids }) => { cmd_impl::meta::delete_meta_snapshots(context, &snapshot_ids).await? } @@ -635,7 +668,7 @@ pub async fn start_impl(opts: CliOpts, context: &CtlContext) -> Result<()> { Commands::Meta(MetaCommands::ValidateSource { props }) => { cmd_impl::meta::validate_source(context, props).await? } - Commands::Trace => cmd_impl::trace::trace(context).await?, + Commands::AwaitTree => cmd_impl::await_tree::dump(context).await?, Commands::Profile(ProfileCommands::Cpu { sleep }) => { cmd_impl::profile::cpu_profile(context, sleep).await? } diff --git a/src/expr/Cargo.toml b/src/expr/core/Cargo.toml similarity index 64% rename from src/expr/Cargo.toml rename to src/expr/core/Cargo.toml index 48d7ada49b192..9801c2767c18b 100644 --- a/src/expr/Cargo.toml +++ b/src/expr/core/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "risingwave_expr" +description = "Core expression traits and function registry." version = { workspace = true } edition = { workspace = true } homepage = { workspace = true } @@ -15,57 +16,48 @@ ignored = ["workspace-hack", "ctor"] normal = ["workspace-hack", "ctor"] [dependencies] -aho-corasick = "1" anyhow = "1" arrow-array = { workspace = true } arrow-schema = { workspace = true } async-trait = "0.1" -auto_enums = "0.8" +auto_impl = "1" await-tree = { workspace = true } -base64 = "0.21" -chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } -chrono-tz = { version = "0.8", features = ["case-insensitive"] } +cfg-or-panic = "0.2" +chrono = { version = "0.4", default-features = false, features = [ + "clock", + "std", +] } ctor = "0.2" downcast-rs = "1.2" easy-ext = "1" either = "1" -futures = { version = "0.3", default-features = false, features = ["alloc"] } futures-async-stream = { workspace = true } futures-util = "0.3" -hex = "0.4.3" itertools = "0.11" -md5 = "0.7.0" num-traits = "0.2" parse-display = "0.8" paste = "1" -regex = "1" risingwave_common = { workspace = true } -risingwave_expr_macro = { path = "macro" } +risingwave_expr_macro = { path = "../macro" } risingwave_pb = { workspace = true } risingwave_udf = { workspace = true } -rust_decimal = { version = "1", features = ["db-postgres", "maths"] } -self_cell = "1.0.1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -sha1 = "0.10.5" -sha2 = "0.10.7" smallvec = "1" static_assertions = "1" thiserror = "1" -tokio = { version = "0.2", package = "madsim-tokio", features = ["rt", "rt-multi-thread", "sync", "macros", "time", "signal"] } +tokio = { version = "0.2", package = "madsim-tokio", features = [ + "rt-multi-thread", +] } tracing = "0.1" [target.'cfg(not(madsim))'.dependencies] -workspace-hack = { path = "../workspace-hack" } +workspace-hack = { path = "../../workspace-hack" } [dev-dependencies] -criterion = { workspace = true } expect-test = "1" -serde_json = "1" - -[[bench]] -name = "expr" -harness = false +tokio = { version = "0.2", package = "madsim-tokio", features = [ + "rt-multi-thread", + "macros", +] } [lints] workspace = true diff --git a/src/expr/src/agg/def.rs b/src/expr/core/src/aggregate/def.rs similarity index 89% rename from src/expr/src/agg/def.rs rename to src/expr/core/src/aggregate/def.rs index 570e910b68714..964ec46c9f9c4 100644 --- a/src/expr/src/agg/def.rs +++ b/src/expr/core/src/aggregate/def.rs @@ -20,9 +20,9 @@ use std::sync::Arc; use itertools::Itertools; use parse_display::{Display, FromStr}; use risingwave_common::bail; -use risingwave_common::types::DataType; +use risingwave_common::types::{DataType, Datum}; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use risingwave_common::util::value_encoding; +use risingwave_common::util::value_encoding::DatumFromProtoExt; use risingwave_pb::expr::agg_call::PbType; use risingwave_pb::expr::{PbAggCall, PbInputRef}; @@ -68,7 +68,7 @@ impl AggCall { }) .collect(); let filter = match agg_call.filter { - Some(ref pb_filter) => Some(build_from_prost(pb_filter)?.into()), + Some(ref pb_filter) => Some(build_from_prost(pb_filter)?.into()), /* TODO: non-strict filter in streaming */ None => None, }; let direct_args = agg_call @@ -78,11 +78,7 @@ impl AggCall { let data_type = DataType::from(arg.get_type().unwrap()); LiteralExpression::new( data_type.clone(), - value_encoding::deserialize_datum( - arg.get_datum().unwrap().get_body().as_slice(), - &data_type, - ) - .unwrap(), + Datum::from_protobuf(arg.get_datum().unwrap(), &data_type).unwrap(), ) }) .collect_vec(); @@ -237,6 +233,9 @@ pub enum AggKind { PercentileDisc, Mode, Grouping, + + /// Return last seen one of the input values. + InternalLastSeenValue, } impl AggKind { @@ -268,6 +267,7 @@ impl AggKind { PbType::PercentileDisc => Ok(AggKind::PercentileDisc), PbType::Mode => Ok(AggKind::Mode), PbType::Grouping => Ok(AggKind::Grouping), + PbType::InternalLastSeenValue => Ok(AggKind::InternalLastSeenValue), PbType::Unspecified => bail!("Unrecognized agg."), } } @@ -298,34 +298,27 @@ impl AggKind { Self::VarSamp => PbType::VarSamp, Self::PercentileCont => PbType::PercentileCont, Self::PercentileDisc => PbType::PercentileDisc, - Self::Grouping => PbType::Grouping, Self::Mode => PbType::Mode, + Self::Grouping => PbType::Grouping, + Self::InternalLastSeenValue => PbType::InternalLastSeenValue, } } } -/// Macros to generate match arms for [`AggKind`](crate::agg::AggKind). +/// Macros to generate match arms for [`AggKind`](crate::aggregate::AggKind). /// IMPORTANT: These macros must be carefully maintained especially when adding new -/// [`AggKind`](crate::agg::AggKind) variants. +/// [`AggKind`](crate::aggregate::AggKind) variants. pub mod agg_kinds { - /// [`AggKind`](crate::agg::AggKind)s that are currently not supported in streaming mode. + /// [`AggKind`](crate::aggregate::AggKind)s that are currently not supported in streaming mode. #[macro_export] macro_rules! unimplemented_in_stream { () => { - AggKind::BitAnd - | AggKind::BitOr - | AggKind::BoolAnd - | AggKind::BoolOr - | AggKind::JsonbAgg - | AggKind::JsonbObjectAgg - | AggKind::PercentileCont - | AggKind::PercentileDisc - | AggKind::Mode + AggKind::PercentileCont | AggKind::PercentileDisc | AggKind::Mode }; } pub use unimplemented_in_stream; - /// [`AggKind`](crate::agg::AggKind)s that should've been rewritten to other kinds. These kinds + /// [`AggKind`](crate::aggregate::AggKind)s that should've been rewritten to other kinds. These kinds /// should not appear when generating physical plan nodes. #[macro_export] macro_rules! rewritten { @@ -340,7 +333,7 @@ pub mod agg_kinds { } pub use rewritten; - /// [`AggKind`](crate::agg::AggKind)s of which the aggregate results are not affected by the + /// [`AggKind`](crate::aggregate::AggKind)s of which the aggregate results are not affected by the /// user given ORDER BY clause. #[macro_export] macro_rules! result_unaffected_by_order_by { @@ -365,7 +358,7 @@ pub mod agg_kinds { } pub use result_unaffected_by_order_by; - /// [`AggKind`](crate::agg::AggKind)s that must be called with ORDER BY clause. These are + /// [`AggKind`](crate::aggregate::AggKind)s that must be called with ORDER BY clause. These are /// slightly different from variants not in [`result_unaffected_by_order_by`], in that /// variants returned by this macro should be banned while the others should just be warned. #[macro_export] @@ -380,7 +373,7 @@ pub mod agg_kinds { } pub use must_have_order_by; - /// [`AggKind`](crate::agg::AggKind)s of which the aggregate results are not affected by the + /// [`AggKind`](crate::aggregate::AggKind)s of which the aggregate results are not affected by the /// user given DISTINCT keyword. #[macro_export] macro_rules! result_unaffected_by_distinct { @@ -396,7 +389,7 @@ pub mod agg_kinds { } pub use result_unaffected_by_distinct; - /// [`AggKind`](crate::agg::AggKind)s that are simply cannot 2-phased. + /// [`AggKind`](crate::aggregate::AggKind)s that are simply cannot 2-phased. #[macro_export] macro_rules! simply_cannot_two_phase { () => { @@ -410,11 +403,17 @@ pub mod agg_kinds { | AggKind::PercentileCont | AggKind::PercentileDisc | AggKind::Mode + // FIXME(wrj): move `BoolAnd` and `BoolOr` out + // after we support general merge in stateless_simple_agg + | AggKind::BoolAnd + | AggKind::BoolOr + | AggKind::BitAnd + | AggKind::BitOr }; } pub use simply_cannot_two_phase; - /// [`AggKind`](crate::agg::AggKind)s that are implemented with a single value state (so-called + /// [`AggKind`](crate::aggregate::AggKind)s that are implemented with a single value state (so-called /// stateless). #[macro_export] macro_rules! single_value_state { @@ -422,13 +421,18 @@ pub mod agg_kinds { AggKind::Sum | AggKind::Sum0 | AggKind::Count + | AggKind::BitAnd + | AggKind::BitOr | AggKind::BitXor + | AggKind::BoolAnd + | AggKind::BoolOr | AggKind::ApproxCountDistinct + | AggKind::InternalLastSeenValue }; } pub use single_value_state; - /// [`AggKind`](crate::agg::AggKind)s that are implemented with a single value state (so-called + /// [`AggKind`](crate::aggregate::AggKind)s that are implemented with a single value state (so-called /// stateless) iff the input is append-only. #[macro_export] macro_rules! single_value_state_iff_in_append_only { @@ -452,14 +456,11 @@ impl AggKind { /// Get the total phase agg kind from the partial phase agg kind. pub fn partial_to_total(self) -> Option { match self { - AggKind::BitAnd - | AggKind::BitOr - | AggKind::BitXor - | AggKind::BoolAnd - | AggKind::BoolOr + AggKind::BitXor | AggKind::Min | AggKind::Max - | AggKind::Sum => Some(self), + | AggKind::Sum + | AggKind::InternalLastSeenValue => Some(self), AggKind::Sum0 | AggKind::Count => Some(AggKind::Sum0), agg_kinds::simply_cannot_two_phase!() => None, agg_kinds::rewritten!() => None, diff --git a/src/expr/src/agg/mod.rs b/src/expr/core/src/aggregate/mod.rs similarity index 71% rename from src/expr/src/agg/mod.rs rename to src/expr/core/src/aggregate/mod.rs index d6f05498b8527..74e3afdb0904c 100644 --- a/src/expr/src/agg/mod.rs +++ b/src/expr/core/src/aggregate/mod.rs @@ -16,26 +16,16 @@ use std::fmt::Debug; use std::ops::Range; use downcast_rs::{impl_downcast, Downcast}; +use itertools::Itertools; use risingwave_common::array::StreamChunk; use risingwave_common::estimate_size::EstimateSize; -use risingwave_common::types::{DataType, DataTypeName, Datum}; +use risingwave_common::types::{DataType, Datum}; -use crate::sig::FuncSigDebug; use crate::{ExprError, Result}; // aggregate definition mod def; -// concrete AggregateFunctions -mod approx_count_distinct; -mod array_agg; -mod general; -mod jsonb_agg; -mod mode; -mod percentile_cont; -mod percentile_disc; -mod string_agg; - pub use self::def::*; /// A trait over all aggregate functions. @@ -62,6 +52,19 @@ pub trait AggregateFunction: Send + Sync + 'static { /// Get aggregate result from the state. async fn get_result(&self, state: &AggregateState) -> Result; + + /// Encode the state into a datum that can be stored in state table. + fn encode_state(&self, state: &AggregateState) -> Result { + match state { + AggregateState::Datum(d) => Ok(d.clone()), + _ => panic!("cannot encode state"), + } + } + + /// Decode the state from a datum in state table. + fn decode_state(&self, datum: Datum) -> Result { + Ok(AggregateState::Datum(datum)) + } } /// Intermediate state of an aggregate function. @@ -118,31 +121,36 @@ impl AggregateState { pub type BoxedAggregateFunction = Box; -/// Build an `AggregateFunction` from `AggCall`. +/// Build an append-only `Aggregator` from `AggCall`. +pub fn build_append_only(agg: &AggCall) -> Result { + build(agg, true) +} + +/// Build a retractable `Aggregator` from `AggCall`. +pub fn build_retractable(agg: &AggCall) -> Result { + build(agg, false) +} + +/// Build an `Aggregator` from `AggCall`. /// /// NOTE: This function ignores argument indices, `column_orders`, `filter` and `distinct` in /// `AggCall`. Such operations should be done in batch or streaming executors. -pub fn build(agg: &AggCall) -> Result { - // NOTE: The function signature is checked by `AggCall::infer_return_type` in the frontend. - - let args = (agg.args.arg_types().iter()) - .map(|t| t.into()) - .collect::>(); - let ret_type = (&agg.return_type).into(); - let desc = crate::sig::agg::AGG_FUNC_SIG_MAP - .get(agg.kind, &args, ret_type) +pub fn build(agg: &AggCall, append_only: bool) -> Result { + let desc = crate::sig::FUNCTION_REGISTRY + .get_aggregate( + agg.kind, + agg.args.arg_types(), + &agg.return_type, + append_only, + ) .ok_or_else(|| { ExprError::UnsupportedFunction(format!( - "{:?}", - FuncSigDebug { - func: agg.kind, - inputs_type: &args, - ret_type, - set_returning: false, - deprecated: false, - } + "{}({}) -> {}", + agg.kind.to_protobuf().as_str_name().to_ascii_lowercase(), + agg.args.arg_types().iter().format(", "), + agg.return_type, )) })?; - (desc.build)(agg) + desc.build_aggregate(agg) } diff --git a/src/expr/core/src/codegen.rs b/src/expr/core/src/codegen.rs new file mode 100644 index 0000000000000..51f9ca21d07b9 --- /dev/null +++ b/src/expr/core/src/codegen.rs @@ -0,0 +1,19 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use async_trait::async_trait; +pub use ctor::ctor; +pub use futures_async_stream::try_stream; +pub use futures_util::stream::BoxStream; +pub use itertools::multizip; diff --git a/src/expr/src/error.rs b/src/expr/core/src/error.rs similarity index 86% rename from src/expr/src/error.rs rename to src/expr/core/src/error.rs index e39b1a09d8be6..ffb0e004c29e8 100644 --- a/src/expr/src/error.rs +++ b/src/expr/core/src/error.rs @@ -21,6 +21,20 @@ use thiserror::Error; /// A specialized Result type for expression operations. pub type Result = std::result::Result; +pub struct ContextUnavailable(&'static str); + +impl ContextUnavailable { + pub fn new(field: &'static str) -> Self { + Self(field) + } +} + +impl From for ExprError { + fn from(e: ContextUnavailable) -> Self { + ExprError::Context(e.0) + } +} + /// The error type for expression operations. #[derive(Error, Debug)] pub enum ExprError { @@ -73,14 +87,20 @@ pub enum ExprError { #[error("not a constant")] NotConstant, - #[error("Context not found")] - Context, + #[error("Context {0} not found")] + Context(&'static str), #[error("field name must not be null")] FieldNameNull, + + #[error("too few arguments for format()")] + TooFewArguments, + + #[error("invalid state: {0}")] + InvalidState(String), } -static_assertions::const_assert_eq!(std::mem::size_of::(), 40); +static_assertions::const_assert_eq!(std::mem::size_of::(), 48); impl From for RwError { fn from(s: ExprError) -> Self { @@ -88,15 +108,6 @@ impl From for RwError { } } -impl From for ExprError { - fn from(re: regex::Error) -> Self { - Self::InvalidParam { - name: "pattern", - reason: re.to_string().into(), - } - } -} - impl From for ExprError { fn from(e: chrono::ParseError) -> Self { Self::Parse(e.to_string().into()) diff --git a/src/expr/src/expr/expr_binary_nullable.rs b/src/expr/core/src/expr/and_or.rs similarity index 71% rename from src/expr/src/expr/expr_binary_nullable.rs rename to src/expr/core/src/expr/and_or.rs index 6864ef12d9508..29e9f2ae5c37f 100644 --- a/src/expr/src/expr/expr_binary_nullable.rs +++ b/src/expr/core/src/expr/and_or.rs @@ -17,14 +17,12 @@ use std::sync::Arc; use risingwave_common::array::*; -use risingwave_common::buffer::Bitmap; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum, Scalar}; use risingwave_expr_macro::build_function; use risingwave_pb::expr::expr_node::Type; use super::{BoxedExpression, Expression}; -use crate::vector_op::conjunction::{and, or}; use crate::Result; /// This is just an implementation detail. The semantic is not guaranteed at SQL level because @@ -43,32 +41,27 @@ impl Expression for BinaryShortCircuitExpression { } async fn eval(&self, input: &DataChunk) -> Result { - let left = self.expr_ia1.eval_checked(input).await?; + let left = self.expr_ia1.eval(input).await?; let left = left.as_bool(); - let res_vis: Vis = match self.expr_type { + let res_vis = match self.expr_type { // For `Or` operator, if res of left part is not null and is true, we do not want to // calculate right part because the result must be true. - Type::Or => (!left.to_bitmap()).into(), + Type::Or => !left.to_bitmap(), // For `And` operator, If res of left part is not null and is false, we do not want // to calculate right part because the result must be false. - Type::And => (left.data() | !left.null_bitmap()).into(), + Type::And => left.data() | !left.null_bitmap(), _ => unimplemented!(), }; - let new_vis = input.vis() & res_vis; + let new_vis = input.visibility() & res_vis; let mut input1 = input.clone(); - input1.set_vis(new_vis); + input1.set_visibility(new_vis); - let right = self.expr_ia2.eval_checked(&input1).await?; + let right = self.expr_ia2.eval(&input1).await?; let right = right.as_bool(); assert_eq!(left.len(), right.len()); - let mut bitmap = match input.visibility() { - Some(vis) => vis.clone(), - None => Bitmap::ones(input.capacity()), - }; - bitmap &= left.null_bitmap(); - bitmap &= right.null_bitmap(); + let mut bitmap = input.visibility() & left.null_bitmap() & right.null_bitmap(); let c = match self.expr_type { Type::Or => { @@ -123,11 +116,36 @@ fn build_or_expr(_: DataType, children: Vec) -> Result boolean")] +fn and(l: Option, r: Option) -> Option { + match (l, r) { + (Some(lb), Some(lr)) => Some(lb & lr), + (Some(true), None) => None, + (None, Some(true)) => None, + (Some(false), None) => Some(false), + (None, Some(false)) => Some(false), + (None, None) => None, + } +} + +// #[function("or(boolean, boolean) -> boolean")] +fn or(l: Option, r: Option) -> Option { + match (l, r) { + (Some(lb), Some(lr)) => Some(lb | lr), + (Some(true), None) => Some(true), + (None, Some(true)) => Some(true), + (Some(false), None) => None, + (None, Some(false)) => None, + (None, None) => None, + } +} + #[cfg(test)] mod tests { use risingwave_common::array::DataChunk; use risingwave_common::test_prelude::DataChunkTestExt; + use super::*; use crate::expr::build_from_pretty; #[tokio::test] @@ -174,56 +192,23 @@ mod tests { assert_eq!(&result, target.column_at(0)); } - #[tokio::test] - async fn test_is_distinct_from() { - let (input, target) = DataChunk::from_pretty( - " - i i B - . . f - . 1 t - 1 . t - 2 2 f - 3 4 t - ", - ) - .split_column_at(2); - let expr = build_from_pretty("(is_distinct_from:boolean $0:int4 $1:int4)"); - let result = expr.eval(&input).await.unwrap(); - assert_eq!(&result, target.column_at(0)); + #[test] + fn test_and_() { + assert_eq!(Some(true), and(Some(true), Some(true))); + assert_eq!(Some(false), and(Some(true), Some(false))); + assert_eq!(Some(false), and(Some(false), Some(false))); + assert_eq!(None, and(Some(true), None)); + assert_eq!(Some(false), and(Some(false), None)); + assert_eq!(None, and(None, None)); } - #[tokio::test] - async fn test_is_not_distinct_from() { - let (input, target) = DataChunk::from_pretty( - " - i i B - . . t - . 1 f - 1 . f - 2 2 t - 3 4 f - ", - ) - .split_column_at(2); - let expr = build_from_pretty("(is_not_distinct_from:boolean $0:int4 $1:int4)"); - let result = expr.eval(&input).await.unwrap(); - assert_eq!(&result, target.column_at(0)); - } - - #[tokio::test] - async fn test_format_type() { - let (input, target) = DataChunk::from_pretty( - " - i i T - 16 0 boolean - 21 . smallint - 9527 0 ??? - . 0 . - ", - ) - .split_column_at(2); - let expr = build_from_pretty("(format_type:varchar $0:int4 $1:int4)"); - let result = expr.eval(&input).await.unwrap(); - assert_eq!(&result, target.column_at(0)); + #[test] + fn test_or_() { + assert_eq!(Some(true), or(Some(true), Some(true))); + assert_eq!(Some(true), or(Some(true), Some(false))); + assert_eq!(Some(false), or(Some(false), Some(false))); + assert_eq!(Some(true), or(Some(true), None)); + assert_eq!(None, or(Some(false), None)); + assert_eq!(None, or(None, None)); } } diff --git a/src/expr/src/expr/build.rs b/src/expr/core/src/expr/build.rs similarity index 53% rename from src/expr/src/expr/build.rs rename to src/expr/core/src/expr/build.rs index 55ec16b4fe0fe..7dffbcd42d66b 100644 --- a/src/expr/src/expr/build.rs +++ b/src/expr/core/src/expr/build.rs @@ -19,76 +19,176 @@ use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_pb::expr::expr_node::{PbType, RexNode}; use risingwave_pb::expr::ExprNode; -use super::expr_array_concat::ArrayConcatExpression; use super::expr_array_transform::ArrayTransformExpression; use super::expr_case::CaseExpression; use super::expr_coalesce::CoalesceExpression; -use super::expr_concat_ws::ConcatWsExpression; use super::expr_field::FieldExpression; use super::expr_in::InExpression; -use super::expr_nested_construct::NestedConstructExpression; -use super::expr_regexp::{RegexpMatchExpression, RegexpReplaceExpression}; use super::expr_some_all::SomeAllExpression; use super::expr_udf::UdfExpression; use super::expr_vnode::VnodeExpression; -use crate::expr::expr_proctime::ProcTimeExpression; -use crate::expr::expr_regexp_count::RegexpCountExpression; +use super::wrapper::checked::Checked; +use super::wrapper::non_strict::NonStrict; +use super::wrapper::EvalErrorReport; +use super::NonStrictExpression; use crate::expr::{ - BoxedExpression, Expression, InputRefExpression, LiteralExpression, TryFromExprNodeBoxed, + BoxedExpression, Expression, ExpressionBoxExt, InputRefExpression, LiteralExpression, }; -use crate::sig::func::FUNC_SIG_MAP; -use crate::sig::FuncSigDebug; +use crate::sig::FUNCTION_REGISTRY; use crate::{bail, ExprError, Result}; /// Build an expression from protobuf. pub fn build_from_prost(prost: &ExprNode) -> Result { - use PbType as E; - - let func_call = match prost.get_rex_node()? { - RexNode::InputRef(_) => return InputRefExpression::try_from_boxed(prost), - RexNode::Constant(_) => return LiteralExpression::try_from_boxed(prost), - RexNode::Udf(_) => return UdfExpression::try_from_boxed(prost), - RexNode::FuncCall(func_call) => func_call, - RexNode::Now(_) => unreachable!("now should not be built at backend"), - }; - - let func_type = prost.function_type(); - - match func_type { - // Dedicated types - E::All | E::Some => SomeAllExpression::try_from_boxed(prost), - E::In => InExpression::try_from_boxed(prost), - E::Case => CaseExpression::try_from_boxed(prost), - E::Coalesce => CoalesceExpression::try_from_boxed(prost), - E::ConcatWs => ConcatWsExpression::try_from_boxed(prost), - E::Field => FieldExpression::try_from_boxed(prost), - E::Array => NestedConstructExpression::try_from_boxed(prost), - E::Row => NestedConstructExpression::try_from_boxed(prost), - E::RegexpMatch => RegexpMatchExpression::try_from_boxed(prost), - E::RegexpReplace => RegexpReplaceExpression::try_from_boxed(prost), - E::RegexpCount => RegexpCountExpression::try_from_boxed(prost), - E::ArrayCat | E::ArrayAppend | E::ArrayPrepend => { - // Now we implement these three functions as a single expression for the - // sake of simplicity. If performance matters at some time, we can split - // the implementation to improve performance. - ArrayConcatExpression::try_from_boxed(prost) + ExprBuilder::new_strict().build(prost) +} + +/// Build an expression from protobuf in non-strict mode. +pub fn build_non_strict_from_prost( + prost: &ExprNode, + error_report: impl EvalErrorReport + 'static, +) -> Result { + ExprBuilder::new_non_strict(error_report) + .build(prost) + .map(NonStrictExpression) +} + +/// Build an expression from protobuf with possibly some wrappers attached to each node. +struct ExprBuilder { + /// The error reporting for non-strict mode. + /// + /// If set, each expression node will be wrapped with a [`NonStrict`] node that reports + /// errors to this error reporting. + error_report: Option, +} + +impl ExprBuilder { + /// Create a new builder in strict mode. + fn new_strict() -> Self { + Self { error_report: None } + } +} + +impl ExprBuilder +where + R: EvalErrorReport + 'static, +{ + /// Create a new builder in non-strict mode with the given error reporting. + fn new_non_strict(error_report: R) -> Self { + Self { + error_report: Some(error_report), } - E::Vnode => VnodeExpression::try_from_boxed(prost), - E::Proctime => ProcTimeExpression::try_from_boxed(prost), - - _ => { - let ret_type = DataType::from(prost.get_return_type().unwrap()); - let children = func_call - .get_children() - .iter() - .map(build_from_prost) - .try_collect()?; - - build_func(func_type, ret_type, children) + } + + /// Attach wrappers to an expression. + #[expect(clippy::let_and_return)] + fn wrap(&self, expr: impl Expression + 'static) -> BoxedExpression { + let checked = Checked(expr); + + let may_non_strict = if let Some(error_report) = &self.error_report { + NonStrict::new(checked, error_report.clone()).boxed() + } else { + checked.boxed() + }; + + may_non_strict + } + + /// Build an expression with `build_inner` and attach some wrappers. + fn build(&self, prost: &ExprNode) -> Result { + let expr = self.build_inner(prost)?; + Ok(self.wrap(expr)) + } + + /// Build an expression from protobuf. + fn build_inner(&self, prost: &ExprNode) -> Result { + use PbType as E; + + let build_child = |prost: &'_ ExprNode| self.build(prost); + + match prost.get_rex_node()? { + RexNode::InputRef(_) => InputRefExpression::build_boxed(prost, build_child), + RexNode::Constant(_) => LiteralExpression::build_boxed(prost, build_child), + RexNode::Udf(_) => UdfExpression::build_boxed(prost, build_child), + + RexNode::FuncCall(_) => match prost.function_type() { + // Dedicated types + E::All | E::Some => SomeAllExpression::build_boxed(prost, build_child), + E::In => InExpression::build_boxed(prost, build_child), + E::Case => CaseExpression::build_boxed(prost, build_child), + E::Coalesce => CoalesceExpression::build_boxed(prost, build_child), + E::Field => FieldExpression::build_boxed(prost, build_child), + E::Vnode => VnodeExpression::build_boxed(prost, build_child), + + // General types, lookup in the function signature map + _ => FuncCallBuilder::build_boxed(prost, build_child), + }, + + RexNode::Now(_) => unreachable!("now should not be built at backend"), } } } +/// Manually build the expression `Self` from protobuf. +pub(crate) trait Build: Expression + Sized { + /// Build the expression `Self` from protobuf. + /// + /// To build children, call `build_child` on each child instead of [`build_from_prost`]. + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result; + + /// Build the expression `Self` from protobuf for test, where each child is built with + /// [`build_from_prost`]. + fn build_for_test(prost: &ExprNode) -> Result { + Self::build(prost, build_from_prost) + } +} + +/// Manually build a boxed expression from protobuf. +pub(crate) trait BuildBoxed: 'static { + /// Build a boxed expression from protobuf. + fn build_boxed( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result; +} + +/// Implement [`BuildBoxed`] for all expressions that implement [`Build`]. +impl BuildBoxed for E { + fn build_boxed( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { + Self::build(prost, build_child).map(ExpressionBoxExt::boxed) + } +} + +/// Build a function call expression from protobuf with [`build_func`]. +struct FuncCallBuilder; + +impl BuildBoxed for FuncCallBuilder { + fn build_boxed( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { + let func_type = prost.function_type(); + let ret_type = DataType::from(prost.get_return_type().unwrap()); + let func_call = prost + .get_rex_node()? + .as_func_call() + .expect("not a func call"); + + let children = func_call + .get_children() + .iter() + .map(build_child) + .try_collect()?; + + build_func(func_type, ret_type, children) + } +} + /// Build an expression in `FuncCall` variant. pub fn build_func( func: PbType, @@ -101,25 +201,34 @@ pub fn build_func( return Ok(ArrayTransformExpression { array, lambda }.boxed()); } - let args = children - .iter() - .map(|c| c.return_type().into()) - .collect_vec(); - let desc = FUNC_SIG_MAP - .get(func, &args, (&ret_type).into()) + let args = children.iter().map(|c| c.return_type()).collect_vec(); + let desc = FUNCTION_REGISTRY + .get(func, &args, &ret_type) .ok_or_else(|| { ExprError::UnsupportedFunction(format!( - "{:?}", - FuncSigDebug { - func: func.as_str_name(), - inputs_type: &args, - ret_type: (&ret_type).into(), - set_returning: false, - deprecated: false, - } + "{}({}) -> {}", + func.as_str_name().to_ascii_lowercase(), + args.iter().format(", "), + ret_type, )) })?; - (desc.build)(ret_type, children) + desc.build_scalar(ret_type, children) +} + +/// Build an expression in `FuncCall` variant in non-strict mode. +/// +/// Note: This is a workaround, and only the root node are wrappedin non-strict mode. +/// Prefer [`build_non_strict_from_prost`] if possible. +pub fn build_func_non_strict( + func: PbType, + ret_type: DataType, + children: Vec, + error_report: impl EvalErrorReport + 'static, +) -> Result { + let expr = build_func(func, ret_type, children)?; + let wrapped = NonStrictExpression(ExprBuilder::new_non_strict(error_report).wrap(expr)); + + Ok(wrapped) } pub(super) fn get_children_and_return_type(prost: &ExprNode) -> Result<(&[ExprNode], DataType)> { @@ -134,7 +243,8 @@ pub(super) fn get_children_and_return_type(prost: &ExprNode) -> Result<(&[ExprNo /// Build an expression from a string. /// /// # Example -/// ``` +/// +/// ```ignore /// # use risingwave_expr::expr::build_from_pretty; /// build_from_pretty("42:int2"); // literal /// build_from_pretty("$0:int8"); // inputref diff --git a/src/expr/src/expr/expr_array_transform.rs b/src/expr/core/src/expr/expr_array_transform.rs similarity index 90% rename from src/expr/src/expr/expr_array_transform.rs rename to src/expr/core/src/expr/expr_array_transform.rs index 1f5d67397b332..016fad81074dd 100644 --- a/src/expr/src/expr/expr_array_transform.rs +++ b/src/expr/core/src/expr/expr_array_transform.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use async_trait::async_trait; -use risingwave_common::array::{ArrayRef, DataChunk, Vis}; +use risingwave_common::array::{ArrayRef, DataChunk}; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum, ListValue, ScalarImpl}; @@ -35,13 +35,12 @@ impl Expression for ArrayTransformExpression { } async fn eval(&self, input: &DataChunk) -> Result { - let lambda_input = self.array.eval_checked(input).await?; + let lambda_input = self.array.eval(input).await?; let lambda_input = Arc::unwrap_or_clone(lambda_input).into_list(); let new_list = lambda_input .map_inner(|flatten_input| async move { let flatten_len = flatten_input.len(); - let chunk = - DataChunk::new(vec![Arc::new(flatten_input)], Vis::Compact(flatten_len)); + let chunk = DataChunk::new(vec![Arc::new(flatten_input)], flatten_len); self.lambda.eval(&chunk).await.map(Arc::unwrap_or_clone) }) .await?; diff --git a/src/expr/src/expr/expr_case.rs b/src/expr/core/src/expr/expr_case.rs similarity index 52% rename from src/expr/src/expr/expr_case.rs rename to src/expr/core/src/expr/expr_case.rs index 894426247043a..49f11298d3e04 100644 --- a/src/expr/src/expr/expr_case.rs +++ b/src/expr/core/src/expr/expr_case.rs @@ -14,15 +14,16 @@ use std::sync::Arc; -use risingwave_common::array::{ArrayRef, DataChunk, Vis}; +use risingwave_common::array::{ArrayRef, DataChunk}; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum}; use risingwave_common::{bail, ensure}; use risingwave_pb::expr::expr_node::{PbType, RexNode}; use risingwave_pb::expr::ExprNode; -use crate::expr::{build_from_prost, BoxedExpression, Expression}; -use crate::{ExprError, Result}; +use super::Build; +use crate::expr::{BoxedExpression, Expression}; +use crate::Result; #[derive(Debug)] pub struct WhenClause { @@ -64,25 +65,20 @@ impl Expression for CaseExpression { let when_len = self.when_clauses.len(); let mut result_array = Vec::with_capacity(when_len + 1); for (when_idx, WhenClause { when, then }) in self.when_clauses.iter().enumerate() { - let calc_then_vis: Vis = when - .eval_checked(&input) - .await? - .as_bool() - .to_bitmap() - .into(); - let input_vis = input.vis().clone(); - input.set_vis(calc_then_vis.clone()); - let then_res = then.eval_checked(&input).await?; + let calc_then_vis = when.eval(&input).await?.as_bool().to_bitmap(); + let input_vis = input.visibility().clone(); + input.set_visibility(calc_then_vis.clone()); + let then_res = then.eval(&input).await?; calc_then_vis .iter_ones() .for_each(|pos| selection[pos] = Some(when_idx)); - input.set_vis(&input_vis & (!&calc_then_vis)); + input.set_visibility(&input_vis & (!calc_then_vis)); result_array.push(then_res); } if let Some(ref else_expr) = self.else_clause { - let else_res = else_expr.eval_checked(&input).await?; + let else_res = else_expr.eval(&input).await?; input - .vis() + .visibility() .iter_ones() .for_each(|pos| selection[pos] = Some(when_len)); result_array.push(else_res); @@ -112,10 +108,11 @@ impl Expression for CaseExpression { } } -impl<'a> TryFrom<&'a ExprNode> for CaseExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for CaseExpression { + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { ensure!(prost.get_function_type().unwrap() == PbType::Case); let ret_type = DataType::from(prost.get_return_type().unwrap()); @@ -126,7 +123,7 @@ impl<'a> TryFrom<&'a ExprNode> for CaseExpression { // children: (when, then)+, (else_clause)? let len = children.len(); let else_clause = if len % 2 == 1 { - let else_clause = build_from_prost(&children[len - 1])?; + let else_clause = build_child(&children[len - 1])?; if else_clause.return_type() != ret_type { bail!("Type mismatched between else and case."); } @@ -138,8 +135,8 @@ impl<'a> TryFrom<&'a ExprNode> for CaseExpression { for i in 0..len / 2 { let when_index = i * 2; let then_index = i * 2 + 1; - let when_expr = build_from_prost(&children[when_index])?; - let then_expr = build_from_prost(&children[then_index])?; + let when_expr = build_child(&children[when_index])?; + let then_expr = build_child(&children[then_index])?; if when_expr.return_type() != DataType::Boolean { bail!("Type mismatched between when clause and condition"); } @@ -158,106 +155,69 @@ impl<'a> TryFrom<&'a ExprNode> for CaseExpression { #[cfg(test)] mod tests { + use risingwave_common::row::Row; use risingwave_common::test_prelude::DataChunkTestExt; - use risingwave_common::types::Scalar; + use risingwave_common::types::ToOwnedDatum; + use risingwave_common::util::iter_util::ZipEqDebug; use super::*; use crate::expr::build_from_pretty; - async fn test_eval_row(expr: CaseExpression, row_inputs: Vec, expected: Vec>) { - for (i, row_input) in row_inputs.iter().enumerate() { - let row = OwnedRow::new(vec![Some(row_input.to_scalar_value())]); - let datum = expr.eval_row(&row).await.unwrap(); - let expected = expected[i].map(|f| f.into()); - assert_eq!(datum, expected) - } - } - #[tokio::test] async fn test_eval_searched_case() { - let ret_type = DataType::Float32; - // when x <= 2 then 3.1 else 4.1 + // when x then 1 else 2 let when_clauses = vec![WhenClause { - when: build_from_pretty("(less_than_or_equal:boolean (cast:float4 $0:int4) 2:float4)"), - then: build_from_pretty("3.1:float4"), + when: build_from_pretty("$0:boolean"), + then: build_from_pretty("1:int4"), }]; - let els = build_from_pretty("4.1:float4"); - let searched_case_expr = CaseExpression::new(ret_type, when_clauses, Some(els)); - let input = DataChunk::from_pretty( - "i - 1 - 2 - 3 - 4 - 5", - ); - let output = searched_case_expr.eval(&input).await.unwrap(); - assert_eq!(output.datum_at(0), Some(3.1f32.into())); - assert_eq!(output.datum_at(1), Some(3.1f32.into())); - assert_eq!(output.datum_at(2), Some(4.1f32.into())); - assert_eq!(output.datum_at(3), Some(4.1f32.into())); - assert_eq!(output.datum_at(4), Some(4.1f32.into())); + let els = build_from_pretty("2:int4"); + let case = CaseExpression::new(DataType::Int32, when_clauses, Some(els)); + let (input, expected) = DataChunk::from_pretty( + "B i + t 1 + f 2 + t 1 + t 1 + f 2", + ) + .split_column_at(1); + + // test eval + let output = case.eval(&input).await.unwrap(); + assert_eq!(&output, expected.column_at(0)); + + // test eval_row + for (row, expected) in input.rows().zip_eq_debug(expected.rows()) { + let result = case.eval_row(&row.to_owned_row()).await.unwrap(); + assert_eq!(result, expected.datum_at(0).to_owned_datum()); + } } #[tokio::test] async fn test_eval_without_else() { - let ret_type = DataType::Float32; - // when x <= 3 then 3.1 - let when_clauses = vec![WhenClause { - when: build_from_pretty("(less_than_or_equal:boolean (cast:float4 $0:int4) 3:float4)"), - then: build_from_pretty("3.1:float4"), - }]; - let searched_case_expr = CaseExpression::new(ret_type, when_clauses, None); - let input = DataChunk::from_pretty( - "i - 3 - 4 - 3 - 4", - ); - let output = searched_case_expr.eval(&input).await.unwrap(); - assert_eq!(output.datum_at(0), Some(3.1f32.into())); - assert_eq!(output.datum_at(1), None); - assert_eq!(output.datum_at(2), Some(3.1f32.into())); - assert_eq!(output.datum_at(3), None); - } - - #[tokio::test] - async fn test_eval_row_searched_case() { - let ret_type = DataType::Float32; - // when x <= 2 then 3.1 else 4.1 + // when x then 1 let when_clauses = vec![WhenClause { - when: build_from_pretty("(less_than_or_equal:boolean (cast:float4 $0:int4) 2:float4)"), - then: build_from_pretty("3.1:float4"), + when: build_from_pretty("$0:boolean"), + then: build_from_pretty("1:int4"), }]; - let els = build_from_pretty("4.1:float4"); - let searched_case_expr = CaseExpression::new(ret_type, when_clauses, Some(els)); - - let row_inputs = vec![1, 2, 3, 4, 5]; - let expected = vec![ - Some(3.1f32), - Some(3.1f32), - Some(4.1f32), - Some(4.1f32), - Some(4.1f32), - ]; - - test_eval_row(searched_case_expr, row_inputs, expected).await; - } - - #[tokio::test] - async fn test_eval_row_without_else() { - let ret_type = DataType::Float32; - // when x <= 3 then 3.1 - let when_clauses = vec![WhenClause { - when: build_from_pretty("(less_than_or_equal:boolean (cast:float4 $0:int4) 3:float4)"), - then: build_from_pretty("3.1:float4"), - }]; - let searched_case_expr = CaseExpression::new(ret_type, when_clauses, None); - - let row_inputs = vec![2, 3, 4, 5]; - let expected = vec![Some(3.1f32), Some(3.1f32), None, None]; - - test_eval_row(searched_case_expr, row_inputs, expected).await; + let case = CaseExpression::new(DataType::Int32, when_clauses, None); + let (input, expected) = DataChunk::from_pretty( + "B i + t 1 + f . + t 1 + f .", + ) + .split_column_at(1); + + // test eval + let output = case.eval(&input).await.unwrap(); + assert_eq!(&output, expected.column_at(0)); + + // test eval_row + for (row, expected) in input.rows().zip_eq_debug(expected.rows()) { + let result = case.eval_row(&row.to_owned_row()).await.unwrap(); + assert_eq!(result, expected.datum_at(0).to_owned_datum()); + } } } diff --git a/src/expr/src/expr/expr_coalesce.rs b/src/expr/core/src/expr/expr_coalesce.rs similarity index 83% rename from src/expr/src/expr/expr_coalesce.rs rename to src/expr/core/src/expr/expr_coalesce.rs index df06ff2a2110b..71c7392c7ec37 100644 --- a/src/expr/src/expr/expr_coalesce.rs +++ b/src/expr/core/src/expr/expr_coalesce.rs @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::convert::TryFrom; use std::ops::BitAnd; use std::sync::Arc; -use risingwave_common::array::{ArrayRef, DataChunk, Vis, VisRef}; +use risingwave_common::array::{ArrayRef, DataChunk}; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum}; use risingwave_pb::expr::expr_node::{RexNode, Type}; use risingwave_pb::expr::ExprNode; -use crate::expr::{build_from_prost as expr_build_from_prost, BoxedExpression, Expression}; -use crate::{bail, ensure, ExprError, Result}; +use super::Build; +use crate::expr::{BoxedExpression, Expression}; +use crate::{bail, ensure, Result}; #[derive(Debug)] pub struct CoalesceExpression { @@ -38,26 +38,20 @@ impl Expression for CoalesceExpression { } async fn eval(&self, input: &DataChunk) -> Result { - let init_vis = input.vis(); + let init_vis = input.visibility(); let mut input = input.clone(); let len = input.capacity(); let mut selection: Vec> = vec![None; len]; let mut children_array = Vec::with_capacity(self.children.len()); for (child_idx, child) in self.children.iter().enumerate() { - let res = child.eval_checked(&input).await?; + let res = child.eval(&input).await?; let res_bitmap = res.null_bitmap(); - let orig_vis = input.vis(); - let res_bitmap_ref: VisRef<'_> = res_bitmap.into(); - orig_vis - .as_ref() - .bitand(res_bitmap_ref) - .iter_ones() - .for_each(|pos| { - selection[pos] = Some(child_idx); - }); - let res_vis: Vis = (!res_bitmap).into(); - let new_vis = orig_vis & res_vis; - input.set_vis(new_vis); + let orig_vis = input.visibility(); + for pos in orig_vis.bitand(res_bitmap).iter_ones() { + selection[pos] = Some(child_idx); + } + let new_vis = orig_vis & !res_bitmap; + input.set_visibility(new_vis); children_array.push(res); } let mut builder = self.return_type.create_array_builder(len); @@ -91,10 +85,11 @@ impl CoalesceExpression { } } -impl<'a> TryFrom<&'a ExprNode> for CoalesceExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for CoalesceExpression { + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { ensure!(prost.get_function_type().unwrap() == Type::Coalesce); let ret_type = DataType::from(prost.get_return_type().unwrap()); @@ -106,7 +101,7 @@ impl<'a> TryFrom<&'a ExprNode> for CoalesceExpression { .children .to_vec() .iter() - .map(expr_build_from_prost) + .map(build_child) .collect::>>()?; Ok(CoalesceExpression::new(ret_type, children)) } @@ -126,7 +121,7 @@ mod tests { use crate::expr::expr_coalesce::CoalesceExpression; use crate::expr::test_utils::make_input_ref; - use crate::expr::Expression; + use crate::expr::{Build, Expression}; pub fn make_coalesce_function(children: Vec, ret: TypeName) -> ExprNode { ExprNode { @@ -153,7 +148,7 @@ mod tests { . . .", ); - let nullif_expr = CoalesceExpression::try_from(&make_coalesce_function( + let nullif_expr = CoalesceExpression::build_for_test(&make_coalesce_function( vec![input_node1, input_node2, input_node3], TypeName::Int32, )) @@ -171,7 +166,7 @@ mod tests { let input_node2 = make_input_ref(1, TypeName::Int32); let input_node3 = make_input_ref(2, TypeName::Int32); - let nullif_expr = CoalesceExpression::try_from(&make_coalesce_function( + let nullif_expr = CoalesceExpression::build_for_test(&make_coalesce_function( vec![input_node1, input_node2, input_node3], TypeName::Int32, )) diff --git a/src/expr/src/expr/expr_field.rs b/src/expr/core/src/expr/expr_field.rs similarity index 89% rename from src/expr/src/expr/expr_field.rs rename to src/expr/core/src/expr/expr_field.rs index 3402335549225..a4101301308ed 100644 --- a/src/expr/src/expr/expr_field.rs +++ b/src/expr/core/src/expr/expr_field.rs @@ -12,18 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::convert::TryFrom; - use anyhow::anyhow; use risingwave_common::array::{ArrayImpl, ArrayRef, DataChunk}; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::value_encoding::deserialize_datum; +use risingwave_common::util::value_encoding::DatumFromProtoExt; use risingwave_pb::expr::expr_node::{RexNode, Type}; use risingwave_pb::expr::ExprNode; -use crate::expr::{build_from_prost as expr_build_from_prost, BoxedExpression, Expression}; -use crate::{bail, ensure, ExprError, Result}; +use super::Build; +use crate::expr::{BoxedExpression, Expression}; +use crate::{bail, ensure, Result}; /// `FieldExpression` access a field from a struct. #[derive(Debug)] @@ -40,7 +39,7 @@ impl Expression for FieldExpression { } async fn eval(&self, input: &DataChunk) -> Result { - let array = self.input.eval_checked(input).await?; + let array = self.input.eval(input).await?; if let ArrayImpl::Struct(struct_array) = array.as_ref() { Ok(struct_array.field_at(self.index).clone()) } else { @@ -70,10 +69,11 @@ impl FieldExpression { } } -impl<'a> TryFrom<&'a ExprNode> for FieldExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for FieldExpression { + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { ensure!(prost.get_function_type().unwrap() == Type::Field); let ret_type = DataType::from(prost.get_return_type().unwrap()); @@ -85,11 +85,11 @@ impl<'a> TryFrom<&'a ExprNode> for FieldExpression { // Field `func_call_node` have 2 child nodes, the first is Field `FuncCall` or // `InputRef`, the second is i32 `Literal`. let [first, second]: [_; 2] = children.try_into().unwrap(); - let input = expr_build_from_prost(&first)?; + let input = build_child(&first)?; let RexNode::Constant(value) = second.get_rex_node().unwrap() else { bail!("Expected Constant as 1st argument"); }; - let index = deserialize_datum(value.body.as_slice(), &DataType::Int32) + let index = Datum::from_protobuf(value, &DataType::Int32) .map_err(|e| anyhow!("Failed to deserialize i32, reason: {:?}", e))? .unwrap() .as_int32() @@ -108,13 +108,13 @@ mod tests { use crate::expr::expr_field::FieldExpression; use crate::expr::test_utils::{make_field_function, make_i32_literal, make_input_ref}; - use crate::expr::Expression; + use crate::expr::{Build, Expression}; #[tokio::test] async fn test_field_expr() { let input_node = make_input_ref(0, TypeName::Struct); let literal_node = make_i32_literal(0); - let field_expr = FieldExpression::try_from(&make_field_function( + let field_expr = FieldExpression::build_for_test(&make_field_function( vec![input_node, literal_node], TypeName::Int32, )) @@ -143,7 +143,7 @@ mod tests { vec![make_input_ref(0, TypeName::Struct), make_i32_literal(0)], TypeName::Int32, ); - let field_expr = FieldExpression::try_from(&make_field_function( + let field_expr = FieldExpression::build_for_test(&make_field_function( vec![field_node, make_i32_literal(1)], TypeName::Int32, )) diff --git a/src/expr/src/expr/expr_in.rs b/src/expr/core/src/expr/expr_in.rs similarity index 88% rename from src/expr/src/expr/expr_in.rs rename to src/expr/core/src/expr/expr_in.rs index cbe356bc1bbd6..cbc5cd244b528 100644 --- a/src/expr/src/expr/expr_in.rs +++ b/src/expr/core/src/expr/expr_in.rs @@ -25,8 +25,9 @@ use risingwave_common::{bail, ensure}; use risingwave_pb::expr::expr_node::{RexNode, Type}; use risingwave_pb::expr::ExprNode; -use crate::expr::{build_from_prost, BoxedExpression, Expression}; -use crate::{ExprError, Result}; +use super::Build; +use crate::expr::{BoxedExpression, Expression}; +use crate::Result; #[derive(Debug)] pub struct InExpression { @@ -74,9 +75,9 @@ impl Expression for InExpression { } async fn eval(&self, input: &DataChunk) -> Result { - let input_array = self.left.eval_checked(input).await?; + let input_array = self.left.eval(input).await?; let mut output_array = BoolArrayBuilder::new(input_array.len()); - for (data, vis) in input_array.iter().zip_eq_fast(input.vis().iter()) { + for (data, vis) in input_array.iter().zip_eq_fast(input.visibility().iter()) { if vis { let ret = self.exists(&data.to_owned_datum()); output_array.append(ret); @@ -94,10 +95,11 @@ impl Expression for InExpression { } } -impl<'a> TryFrom<&'a ExprNode> for InExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for InExpression { + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { ensure!(prost.get_function_type().unwrap() == Type::In); let ret_type = DataType::from(prost.get_return_type().unwrap()); @@ -106,13 +108,13 @@ impl<'a> TryFrom<&'a ExprNode> for InExpression { }; let children = &func_call_node.children; - let left_expr = build_from_prost(&children[0])?; + let left_expr = build_child(&children[0])?; let mut data = Vec::new(); // Used for const expression below to generate datum. // Frontend has made sure these can all be folded to constants. let data_chunk = DataChunk::new_dummy(1); for child in &children[1..] { - let const_expr = build_from_prost(child)?; + let const_expr = build_child(child)?; let array = const_expr .eval(&data_chunk) .now_or_never() @@ -129,15 +131,15 @@ mod tests { use risingwave_common::array::DataChunk; use risingwave_common::row::OwnedRow; use risingwave_common::test_prelude::DataChunkTestExt; - use risingwave_common::types::{DataType, ScalarImpl}; - use risingwave_common::util::value_encoding::serialize_datum; + use risingwave_common::types::{DataType, Datum, ScalarImpl}; + use risingwave_common::util::value_encoding::DatumToProtoExt; use risingwave_pb::data::data_type::TypeName; - use risingwave_pb::data::{PbDataType, PbDatum}; + use risingwave_pb::data::PbDataType; use risingwave_pb::expr::expr_node::{RexNode, Type}; use risingwave_pb::expr::{ExprNode, FunctionCall}; use crate::expr::expr_in::InExpression; - use crate::expr::{Expression, InputRefExpression}; + use crate::expr::{Build, Expression, InputRefExpression}; #[test] fn test_in_expr() { @@ -156,9 +158,7 @@ mod tests { type_name: TypeName::Varchar as i32, ..Default::default() }), - rex_node: Some(RexNode::Constant(PbDatum { - body: serialize_datum(Some("ABC".into()).as_ref()), - })), + rex_node: Some(RexNode::Constant(Datum::Some("ABC".into()).to_protobuf())), }, ExprNode { function_type: Type::Unspecified as i32, @@ -166,9 +166,7 @@ mod tests { type_name: TypeName::Varchar as i32, ..Default::default() }), - rex_node: Some(RexNode::Constant(PbDatum { - body: serialize_datum(Some("def".into()).as_ref()), - })), + rex_node: Some(RexNode::Constant(Datum::Some("def".into()).to_protobuf())), }, ]; let mut in_children = vec![input_ref_expr_node]; @@ -184,7 +182,7 @@ mod tests { }), rex_node: Some(RexNode::FuncCall(call)), }; - assert!(InExpression::try_from(&p).is_ok()); + assert!(InExpression::build_for_test(&p).is_ok()); } #[tokio::test] @@ -233,7 +231,7 @@ mod tests { .eval(&data_chunks[i]) .await .unwrap() - .compact(vis.unwrap(), expected[i].len()); + .compact(vis, expected[i].len()); for (i, expect) in expected[i].iter().enumerate() { assert_eq!(res.datum_at(i), expect.map(ScalarImpl::Bool)); diff --git a/src/expr/src/expr/expr_input_ref.rs b/src/expr/core/src/expr/expr_input_ref.rs similarity index 84% rename from src/expr/src/expr/expr_input_ref.rs rename to src/expr/core/src/expr/expr_input_ref.rs index a511da81226dd..8e8ac4364ba64 100644 --- a/src/expr/src/expr/expr_input_ref.rs +++ b/src/expr/core/src/expr/expr_input_ref.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::convert::TryFrom; use std::ops::Index; use risingwave_common::array::{ArrayRef, DataChunk}; @@ -20,8 +19,9 @@ use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum}; use risingwave_pb::expr::ExprNode; +use super::{BoxedExpression, Build}; use crate::expr::Expression; -use crate::{ExprError, Result}; +use crate::Result; /// A reference to a column in input relation. #[derive(Debug, Clone)] @@ -51,6 +51,19 @@ impl InputRefExpression { InputRefExpression { return_type, idx } } + /// Create an [`InputRefExpression`] from a protobuf expression. + /// + /// Panics if the protobuf expression is not an input reference. + pub fn from_prost(prost: &ExprNode) -> Self { + let ret_type = DataType::from(prost.get_return_type().unwrap()); + let input_col_idx = prost.get_rex_node().unwrap().as_input_ref().unwrap(); + + Self { + return_type: ret_type, + idx: *input_col_idx as _, + } + } + pub fn index(&self) -> usize { self.idx } @@ -60,17 +73,12 @@ impl InputRefExpression { } } -impl<'a> TryFrom<&'a ExprNode> for InputRefExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - let ret_type = DataType::from(prost.get_return_type().unwrap()); - let input_col_idx = prost.get_rex_node().unwrap().as_input_ref().unwrap(); - - Ok(Self { - return_type: ret_type, - idx: *input_col_idx as _, - }) +impl Build for InputRefExpression { + fn build( + prost: &ExprNode, + _build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { + Ok(Self::from_prost(prost)) } } diff --git a/src/expr/src/expr/expr_literal.rs b/src/expr/core/src/expr/expr_literal.rs similarity index 82% rename from src/expr/src/expr/expr_literal.rs rename to src/expr/core/src/expr/expr_literal.rs index 1f576b01ab9a5..54202ba732d3e 100644 --- a/src/expr/src/expr/expr_literal.rs +++ b/src/expr/core/src/expr/expr_literal.rs @@ -12,15 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::convert::TryFrom; - use risingwave_common::array::DataChunk; use risingwave_common::row::OwnedRow; use risingwave_common::types::{literal_type_match, DataType, Datum}; -use risingwave_common::util::value_encoding::deserialize_datum; +use risingwave_common::util::value_encoding::DatumFromProtoExt; use risingwave_pb::expr::ExprNode; -use super::ValueImpl; +use super::{Build, ValueImpl}; use crate::expr::Expression; use crate::{ExprError, Result}; @@ -67,17 +65,17 @@ impl LiteralExpression { } } -impl<'a> TryFrom<&'a ExprNode> for LiteralExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for LiteralExpression { + fn build( + prost: &ExprNode, + _build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { let ret_type = DataType::from(prost.get_return_type().unwrap()); let prost_value = prost.get_rex_node().unwrap().as_constant().unwrap(); - // TODO: We need to unify these - let value = deserialize_datum( - prost_value.get_body().as_slice(), + let value = Datum::from_protobuf( + prost_value, &DataType::from(prost.get_return_type().unwrap()), ) .map_err(|e| ExprError::Internal(e.into()))?; @@ -93,7 +91,7 @@ mod tests { use risingwave_common::array::{I32Array, StructValue}; use risingwave_common::types::test_utils::IntervalTestExt; use risingwave_common::types::{Decimal, Interval, IntoOrdered, Scalar, ScalarImpl}; - use risingwave_common::util::value_encoding::serialize_datum; + use risingwave_common::util::value_encoding::{serialize_datum, DatumToProtoExt}; use risingwave_pb::data::data_type::{IntervalType, TypeName}; use risingwave_pb::data::{PbDataType, PbDatum}; use risingwave_pb::expr::expr_node::RexNode::{self, Constant}; @@ -109,7 +107,7 @@ mod tests { Some(2.into()), None, ]); - let body = serialize_datum(Some(value.clone().to_scalar_value()).as_ref()); + let pb_datum = Some(value.clone().to_scalar_value()).to_protobuf(); let expr = ExprNode { function_type: Type::Unspecified as i32, return_type: Some(PbDataType { @@ -130,9 +128,9 @@ mod tests { ], ..Default::default() }), - rex_node: Some(Constant(PbDatum { body })), + rex_node: Some(Constant(pb_datum)), }; - let expr = LiteralExpression::try_from(&expr).unwrap(); + let expr = LiteralExpression::build_for_test(&expr).unwrap(); assert_eq!(value.to_scalar_value(), expr.literal().unwrap()); } @@ -142,62 +140,62 @@ mod tests { let t = TypeName::Boolean; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = 1i16; let t = TypeName::Int16; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = 1i32; let t = TypeName::Int32; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = 1i64; let t = TypeName::Int64; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = 1f32.into_ordered(); let t = TypeName::Float; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = 1f64.into_ordered(); let t = TypeName::Double; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = None; let t = TypeName::Float; let bytes = serialize_datum(Datum::None); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v, expr.literal()); let v: Box = "varchar".into(); let t = TypeName::Varchar; let bytes = serialize_datum(Some(v.clone().to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = Decimal::from_i128_with_scale(3141, 3); let t = TypeName::Decimal; let bytes = serialize_datum(Some(v.to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!(v.to_scalar_value(), expr.literal().unwrap()); let v = 32i32; let t = TypeName::Interval; let bytes = serialize_datum(Some(Interval::from_month(v).to_scalar_value()).as_ref()); - let expr = LiteralExpression::try_from(&make_expression(bytes, t)).unwrap(); + let expr = LiteralExpression::build_for_test(&make_expression(bytes, t)).unwrap(); assert_eq!( Interval::from_month(v).to_scalar_value(), expr.literal().unwrap() diff --git a/src/expr/src/expr/expr_some_all.rs b/src/expr/core/src/expr/expr_some_all.rs similarity index 87% rename from src/expr/src/expr/expr_some_all.rs rename to src/expr/core/src/expr/expr_some_all.rs index 8b47df11343f3..8978824bef0bc 100644 --- a/src/expr/src/expr/expr_some_all.rs +++ b/src/expr/core/src/expr/expr_some_all.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use itertools::{multizip, Itertools}; +use itertools::Itertools; use risingwave_common::array::{Array, ArrayRef, BoolArray, DataChunk}; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum, ListRef, Scalar, ScalarImpl, ScalarRefImpl}; @@ -24,8 +24,8 @@ use risingwave_pb::expr::expr_node::{RexNode, Type}; use risingwave_pb::expr::{ExprNode, FunctionCall}; use super::build::get_children_and_return_type; -use super::{build_from_prost, BoxedExpression, Expression}; -use crate::{ExprError, Result}; +use super::{BoxedExpression, Build, Expression}; +use crate::Result; #[derive(Debug)] pub struct SomeAllExpression { @@ -82,9 +82,8 @@ impl Expression for SomeAllExpression { } async fn eval(&self, data_chunk: &DataChunk) -> Result { - let arr_left = self.left_expr.eval_checked(data_chunk).await?; - let arr_right = self.right_expr.eval_checked(data_chunk).await?; - let bitmap = data_chunk.visibility(); + let arr_left = self.left_expr.eval(data_chunk).await?; + let arr_right = self.right_expr.eval(data_chunk).await?; let mut num_array = Vec::with_capacity(data_chunk.capacity()); let arr_right_inner = arr_right.as_list(); @@ -124,24 +123,21 @@ impl Expression for SomeAllExpression { } }; - match bitmap { - Some(bitmap) => { - for ((left, right), visible) in arr_left - .iter() - .zip_eq_fast(arr_right.iter()) - .zip_eq_fast(bitmap.iter()) - { - if !visible { - num_array.push(None); - continue; - } - unfolded_left_right(left, right, &mut num_array); - } + if data_chunk.is_compacted() { + for (left, right) in arr_left.iter().zip_eq_fast(arr_right.iter()) { + unfolded_left_right(left, right, &mut num_array); } - None => { - for (left, right) in multizip((arr_left.iter(), arr_right.iter())) { - unfolded_left_right(left, right, &mut num_array); + } else { + for ((left, right), visible) in arr_left + .iter() + .zip_eq_fast(arr_right.iter()) + .zip_eq_fast(data_chunk.visibility().iter()) + { + if !visible { + num_array.push(None); + continue; } + unfolded_left_right(left, right, &mut num_array); } } @@ -206,10 +202,11 @@ impl Expression for SomeAllExpression { } } -impl<'a> TryFrom<&'a ExprNode> for SomeAllExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for SomeAllExpression { + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { let outer_expr_type = prost.get_function_type().unwrap(); let (outer_children, outer_return_type) = get_children_and_return_type(prost)?; ensure!(matches!(outer_return_type, DataType::Boolean)); @@ -224,8 +221,8 @@ impl<'a> TryFrom<&'a ExprNode> for SomeAllExpression { (inner_children, inner_return_type) = get_children_and_return_type(&inner_children[0])?; } - let left_expr = build_from_prost(&inner_children[0])?; - let right_expr = build_from_prost(&inner_children[1])?; + let left_expr = build_child(&inner_children[0])?; + let right_expr = build_child(&inner_children[1])?; let DataType::List(right_expr_return_type) = right_expr.return_type() else { bail!("Expect Array Type"); @@ -258,7 +255,7 @@ impl<'a> TryFrom<&'a ExprNode> for SomeAllExpression { })), } } - build_from_prost(&root_expr_node)? + build_child(&root_expr_node)? }; Ok(SomeAllExpression::new( diff --git a/src/expr/src/expr/expr_udf.rs b/src/expr/core/src/expr/expr_udf.rs similarity index 78% rename from src/expr/src/expr/expr_udf.rs rename to src/expr/core/src/expr/expr_udf.rs index db069a03763bd..a7867f49ec392 100644 --- a/src/expr/src/expr/expr_udf.rs +++ b/src/expr/core/src/expr/expr_udf.rs @@ -16,9 +16,10 @@ use std::collections::HashMap; use std::convert::TryFrom; use std::sync::{Arc, LazyLock, Mutex, Weak}; -use arrow_schema::{Field, Fields, Schema, SchemaRef}; +use arrow_schema::{Field, Fields, Schema}; use await_tree::InstrumentAwait; -use risingwave_common::array::{ArrayImpl, ArrayRef, DataChunk}; +use cfg_or_panic::cfg_or_panic; +use risingwave_common::array::{ArrayRef, DataChunk}; use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum}; use risingwave_pb::expr::user_defined_function::PbExtra; @@ -27,16 +28,16 @@ use risingwave_udf::wasm::{InstantiatedComponent, WasmEngine}; use risingwave_udf::ArrowFlightUdfClient; use tracing::Instrument; -use super::{build_from_prost, BoxedExpression}; +use super::{BoxedExpression, Build}; use crate::expr::Expression; -use crate::{bail, ExprError, Result}; +use crate::{bail, Result}; #[derive(Debug)] pub struct UdfExpression { children: Vec, arg_types: Vec, return_type: DataType, - arg_schema: SchemaRef, + arg_schema: Arc, imp: UdfImpl, span: await_tree::Span, } @@ -67,19 +68,19 @@ impl std::fmt::Debug for UdfImpl { } } -#[cfg(not(madsim))] #[async_trait::async_trait] impl Expression for UdfExpression { fn return_type(&self) -> DataType { self.return_type.clone() } + #[cfg_or_panic(not(madsim))] async fn eval(&self, input: &DataChunk) -> Result { - let vis = input.vis().to_bitmap(); + let vis = input.visibility(); let mut columns = Vec::with_capacity(self.children.len()); for child in &self.children { - let array = child.eval_checked(input).await?; - columns.push(array.as_ref().try_into()?); + let array = child.eval(input).await?; + columns.push(array); } self.eval_inner(columns, vis).await } @@ -92,14 +93,8 @@ impl Expression for UdfExpression { } let arg_row = OwnedRow::new(columns); let chunk = DataChunk::from_rows(std::slice::from_ref(&arg_row), &self.arg_types); - let arg_columns = chunk - .columns() - .iter() - .map::, _>(|c| Ok(c.as_ref().try_into()?)) - .try_collect()?; - let output_array = self - .eval_inner(arg_columns, chunk.vis().to_bitmap()) - .await?; + let arg_columns = chunk.columns().to_vec(); + let output_array = self.eval_inner(arg_columns, chunk.visibility()).await?; Ok(output_array.to_datum()) } } @@ -107,13 +102,29 @@ impl Expression for UdfExpression { impl UdfExpression { async fn eval_inner( &self, - columns: Vec, - vis: risingwave_common::buffer::Bitmap, + columns: Vec, + vis: &risingwave_common::buffer::Bitmap, ) -> Result { - let opts = arrow_array::RecordBatchOptions::default().with_row_count(Some(vis.len())); - let input = - arrow_array::RecordBatch::try_new_with_options(self.arg_schema.clone(), columns, &opts) - .expect("failed to build record batch"); + let chunk = DataChunk::new(columns, vis.clone()); + let compacted_chunk = chunk.compact_cow(); + let compacted_columns: Vec = compacted_chunk + .columns() + .iter() + .map(|c| { + c.as_ref() + .try_into() + .expect("failed covert ArrayRef to arrow_array::ArrayRef") + }) + .collect(); + let opts = + arrow_array::RecordBatchOptions::default().with_row_count(Some(vis.count_ones())); + let input = arrow_array::RecordBatch::try_new_with_options( + self.arg_schema.clone(), + compacted_columns, + &opts, + ) + .expect("failed to build record batch"); + let output: arrow_array::RecordBatch = match &self.imp { UdfImpl::Wasm { component } => { component @@ -128,27 +139,31 @@ impl UdfExpression { .await? } }; - if output.num_rows() != vis.len() { + if output.num_rows() != vis.count_ones() { bail!( "UDF returned {} rows, but expected {}", output.num_rows(), vis.len(), ); } - let Some(arrow_array) = output.columns().get(0) else { + + let data_chunk = + DataChunk::try_from(&output).expect("failed to convert UDF output to DataChunk"); + let output = data_chunk.uncompact(vis.clone()); + + let Some(array) = output.columns().get(0) else { bail!("UDF returned no columns"); }; - let mut array = ArrayImpl::try_from(arrow_array)?; - array.set_bitmap(array.null_bitmap() & vis); - Ok(Arc::new(array)) + Ok(array.clone()) } } -#[cfg(not(madsim))] -impl<'a> TryFrom<&'a ExprNode> for UdfExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +#[cfg_or_panic(not(madsim))] +impl Build for UdfExpression { + fn build( + prost: &ExprNode, + build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { let return_type = DataType::from(prost.get_return_type().unwrap()); let udf = prost.get_rex_node().unwrap().as_udf().unwrap(); @@ -189,7 +204,7 @@ impl<'a> TryFrom<&'a ExprNode> for UdfExpression { }; Ok(Self { - children: udf.children.iter().map(build_from_prost).try_collect()?, + children: udf.children.iter().map(build_child).try_collect()?, arg_types: udf.arg_types.iter().map(|t| t.into()).collect(), return_type, arg_schema, diff --git a/src/expr/src/expr/expr_vnode.rs b/src/expr/core/src/expr/expr_vnode.rs similarity index 89% rename from src/expr/src/expr/expr_vnode.rs rename to src/expr/core/src/expr/expr_vnode.rs index 75b029a592d77..200c1a2f03fa9 100644 --- a/src/expr/src/expr/expr_vnode.rs +++ b/src/expr/core/src/expr/expr_vnode.rs @@ -21,9 +21,9 @@ use risingwave_common::types::{DataType, Datum}; use risingwave_pb::expr::expr_node::{RexNode, Type}; use risingwave_pb::expr::ExprNode; -use super::Expression; +use super::{BoxedExpression, Build, Expression}; use crate::expr::InputRefExpression; -use crate::{bail, ensure, ExprError, Result}; +use crate::{bail, ensure, Result}; #[derive(Debug)] pub struct VnodeExpression { @@ -36,10 +36,11 @@ impl VnodeExpression { } } -impl<'a> TryFrom<&'a ExprNode> for VnodeExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { +impl Build for VnodeExpression { + fn build( + prost: &ExprNode, + _build_child: impl Fn(&ExprNode) -> Result, + ) -> Result { ensure!(prost.get_function_type().unwrap() == Type::Vnode); ensure!(DataType::from(prost.get_return_type().unwrap()) == DataType::Int16); @@ -50,9 +51,9 @@ impl<'a> TryFrom<&'a ExprNode> for VnodeExpression { let dist_key_input_refs = func_call_node .get_children() .iter() - .map(InputRefExpression::try_from) - .map(|res| res.map(|input| input.index())) - .try_collect()?; + .map(InputRefExpression::from_prost) + .map(|input| input.index()) + .collect(); Ok(VnodeExpression::new(dist_key_input_refs)) } @@ -95,7 +96,7 @@ mod tests { use super::VnodeExpression; use crate::expr::test_utils::make_input_ref; - use crate::expr::Expression; + use crate::expr::{Build, Expression}; pub fn make_vnode_function(children: Vec) -> ExprNode { ExprNode { @@ -113,7 +114,7 @@ mod tests { let input_node1 = make_input_ref(0, TypeName::Int32); let input_node2 = make_input_ref(0, TypeName::Int64); let input_node3 = make_input_ref(0, TypeName::Varchar); - let vnode_expr = VnodeExpression::try_from(&make_vnode_function(vec![ + let vnode_expr = VnodeExpression::build_for_test(&make_vnode_function(vec![ input_node1, input_node2, input_node3, @@ -138,7 +139,7 @@ mod tests { let input_node1 = make_input_ref(0, TypeName::Int32); let input_node2 = make_input_ref(0, TypeName::Int64); let input_node3 = make_input_ref(0, TypeName::Varchar); - let vnode_expr = VnodeExpression::try_from(&make_vnode_function(vec![ + let vnode_expr = VnodeExpression::build_for_test(&make_vnode_function(vec![ input_node1, input_node2, input_node3, diff --git a/src/expr/src/expr/mod.rs b/src/expr/core/src/expr/mod.rs similarity index 50% rename from src/expr/src/expr/mod.rs rename to src/expr/core/src/expr/mod.rs index 60f2484bf010d..48a46f640bf7b 100644 --- a/src/expr/src/expr/mod.rs +++ b/src/expr/core/src/expr/mod.rs @@ -32,52 +32,33 @@ //! [`eval`]: Expression::eval // These modules define concrete expression structures. -mod expr_array_concat; -mod expr_array_to_string; +mod and_or; mod expr_array_transform; -mod expr_binary_nonnull; -mod expr_binary_nullable; mod expr_case; mod expr_coalesce; -mod expr_concat_ws; mod expr_field; mod expr_in; mod expr_input_ref; -mod expr_is_null; -mod expr_jsonb_access; mod expr_literal; -mod expr_nested_construct; -mod expr_proctime; -pub mod expr_regexp; -pub mod expr_regexp_count; mod expr_some_all; -mod expr_timestamp_to_char_const_tmpl; -mod expr_timestamptz_to_char_const_tmpl; -mod expr_to_date_const_tmpl; -mod expr_to_timestamp_const_tmpl; pub(crate) mod expr_udf; -mod expr_unary; mod expr_vnode; +pub(crate) mod wrapper; mod build; -pub(crate) mod template; -pub(crate) mod template_fast; pub mod test_utils; mod value; -use std::sync::Arc; - use futures_util::TryFutureExt; use risingwave_common::array::{ArrayRef, DataChunk}; -use risingwave_common::row::{OwnedRow, Row}; +use risingwave_common::row::OwnedRow; use risingwave_common::types::{DataType, Datum}; -use risingwave_pb::expr::PbExprNode; -use static_assertions::const_assert; pub use self::build::*; pub use self::expr_input_ref::InputRefExpression; pub use self::expr_literal::LiteralExpression; pub use self::value::{ValueImpl, ValueRef}; +pub use self::wrapper::*; pub use super::{ExprError, Result}; /// Interface of an expression. @@ -86,20 +67,11 @@ pub use super::{ExprError, Result}; /// should be implemented. Prefer calling and implementing `eval_v2` instead of `eval` if possible, /// to gain the performance benefit of scalar expression. #[async_trait::async_trait] +#[auto_impl::auto_impl(&, Box)] pub trait Expression: std::fmt::Debug + Sync + Send { /// Get the return data type. fn return_type(&self) -> DataType; - /// Eval the result with extra checks. - async fn eval_checked(&self, input: &DataChunk) -> Result { - let res = self.eval(input).await?; - - // TODO: Decide to use assert or debug_assert by benchmarks. - assert_eq!(res.len(), input.capacity()); - - Ok(res) - } - /// Evaluate the expression in vectorized execution. Returns an array. /// /// The default implementation calls `eval_v2` and always converts the result to an array. @@ -130,72 +102,111 @@ pub trait Expression: std::fmt::Debug + Sync + Send { fn eval_const(&self) -> Result { Err(ExprError::NotConstant) } +} + +/// An owned dynamically typed [`Expression`]. +pub type BoxedExpression = Box; +/// Extension trait for boxing expressions. +/// +/// This is not directly made into [`Expression`] trait because... +/// - an expression does not have to be `'static`, +/// - and for the ease of `auto_impl`. +#[easy_ext::ext(ExpressionBoxExt)] +impl E { /// Wrap the expression in a Box. - fn boxed(self) -> BoxedExpression - where - Self: Sized + Send + 'static, - { + pub fn boxed(self) -> BoxedExpression { Box::new(self) } } -/// Extension trait to convert the protobuf representation to a boxed [`Expression`], with a -/// concrete expression type. -#[easy_ext::ext(TryFromExprNodeBoxed)] -impl<'a, T> T +/// An type-safe wrapper that indicates the inner expression can be evaluated in a non-strict +/// manner, i.e., developers can directly call `eval_infallible` and `eval_row_infallible` without +/// checking the result. +/// +/// This is usually created by non-strict build functions like [`crate::expr::build_non_strict_from_prost`] +/// and [`crate::expr::build_func_non_strict`]. It can also be created directly by +/// [`NonStrictExpression::new_topmost`], where only the evaluation of the topmost level expression +/// node is non-strict and should be treated as a TODO. +/// +/// Compared to [`crate::expr::wrapper::non_strict::NonStrict`], this is more like an indicator +/// applied on the root of an expression tree, while the latter is a wrapper that can be applied on +/// each node of the tree and actually changes the behavior. As a result, [`NonStrictExpression`] +/// does not implement [`Expression`] trait and instead deals directly with developers. +#[derive(Debug)] +pub struct NonStrictExpression(E); + +impl NonStrictExpression where - T: TryFrom<&'a PbExprNode, Error = ExprError> + Expression + 'static, + E: Expression, { - /// Performs the conversion. - fn try_from_boxed(expr: &'a PbExprNode) -> Result { - T::try_from(expr).map(|e| e.boxed()) + /// Create a non-strict expression directly wrapping the given expression. + /// + /// Should only be used in tests as evaluation may panic. + pub fn for_test(inner: E) -> NonStrictExpression + where + E: 'static, + { + NonStrictExpression(inner.boxed()) } -} -impl dyn Expression { - pub async fn eval_infallible(&self, input: &DataChunk, on_err: impl Fn(ExprError)) -> ArrayRef { - const_assert!(!STRICT_MODE); - - if let Ok(array) = self.eval(input).await { - return array; - } - - // When eval failed, recompute in row-based execution - // and pad with NULL for each failed row. - let mut array_builder = self.return_type().create_array_builder(input.cardinality()); - for row in input.rows_with_holes() { - if let Some(row) = row { - let datum = self - .eval_row_infallible(&row.into_owned_row(), &on_err) - .await; - array_builder.append(&datum); - } else { - array_builder.append_null(); - } - } - Arc::new(array_builder.finish()) + /// Create a non-strict expression from the given expression, where only the evaluation of the + /// topmost level expression node is non-strict (which is subtly different from + /// [`crate::expr::build_non_strict_from_prost`] where every node is non-strict). + /// + /// This should be used as a TODO. + pub fn new_topmost( + inner: E, + error_report: impl EvalErrorReport, + ) -> NonStrictExpression { + let inner = wrapper::non_strict::NonStrict::new(inner, error_report); + NonStrictExpression(inner) } - pub async fn eval_row_infallible(&self, input: &OwnedRow, on_err: impl Fn(ExprError)) -> Datum { - const_assert!(!STRICT_MODE); + /// Get the return data type. + pub fn return_type(&self) -> DataType { + self.0.return_type() + } - self.eval_row(input).await.unwrap_or_else(|err| { - on_err(err); - None - }) + /// Evaluate the expression in vectorized execution and assert it succeeds. Returns an array. + /// + /// Use with expressions built in non-strict mode. + pub async fn eval_infallible(&self, input: &DataChunk) -> ArrayRef { + self.0.eval(input).await.expect("evaluation failed") } -} -/// An owned dynamically typed [`Expression`]. -pub type BoxedExpression = Box; + /// Evaluate the expression in row-based execution and assert it succeeds. Returns a nullable + /// scalar. + /// + /// Use with expressions built in non-strict mode. + pub async fn eval_row_infallible(&self, input: &OwnedRow) -> Datum { + self.0.eval_row(input).await.expect("evaluation failed") + } -/// Controls the behavior when a compute error happens. -/// -/// - If set to `false`, `NULL` will be inserted. -/// - TODO: If set to `true`, The MV will be suspended and removed from further checkpoints. It can -/// still be used to serve outdated data without corruption. + /// Unwrap the inner expression. + pub fn into_inner(self) -> E { + self.0 + } + + /// Get a reference to the inner expression. + pub fn inner(&self) -> &E { + &self.0 + } +} + +/// An optional context that can be used in a function. /// -/// See also . -#[allow(dead_code)] -const STRICT_MODE: bool = false; +/// # Example +/// ```ignore +/// #[function("foo(int4) -> int8")] +/// fn foo(a: i32, ctx: &Context) -> i64 { +/// assert_eq!(ctx.arg_types[0], DataType::Int32); +/// assert_eq!(ctx.return_type, DataType::Int64); +/// // ... +/// } +/// ``` +#[derive(Debug)] +pub struct Context { + pub arg_types: Vec, + pub return_type: DataType, +} diff --git a/src/expr/src/expr/test_utils.rs b/src/expr/core/src/expr/test_utils.rs similarity index 93% rename from src/expr/src/expr/test_utils.rs rename to src/expr/core/src/expr/test_utils.rs index d276413c02678..56ebcdfddf784 100644 --- a/src/expr/src/expr/test_utils.rs +++ b/src/expr/core/src/expr/test_utils.rs @@ -18,9 +18,9 @@ use std::num::NonZeroUsize; use num_traits::CheckedSub; use risingwave_common::types::{DataType, Interval, ScalarImpl}; -use risingwave_common::util::value_encoding::serialize_datum; +use risingwave_common::util::value_encoding::DatumToProtoExt; use risingwave_pb::data::data_type::TypeName; -use risingwave_pb::data::{PbDataType, PbDatum}; +use risingwave_pb::data::PbDataType; use risingwave_pb::expr::expr_node::Type::Field; use risingwave_pb::expr::expr_node::{self, RexNode, Type}; use risingwave_pb::expr::{ExprNode, FunctionCall}; @@ -57,9 +57,9 @@ pub fn make_i32_literal(data: i32) -> ExprNode { type_name: TypeName::Int32 as i32, ..Default::default() }), - rex_node: Some(RexNode::Constant(PbDatum { - body: serialize_datum(Some(ScalarImpl::Int32(data)).as_ref()), - })), + rex_node: Some(RexNode::Constant( + Some(ScalarImpl::Int32(data)).to_protobuf(), + )), } } @@ -70,9 +70,9 @@ fn make_interval_literal(data: Interval) -> ExprNode { type_name: TypeName::Interval as i32, ..Default::default() }), - rex_node: Some(RexNode::Constant(PbDatum { - body: serialize_datum(Some(ScalarImpl::Interval(data)).as_ref()), - })), + rex_node: Some(RexNode::Constant( + Some(ScalarImpl::Interval(data)).to_protobuf(), + )), } } diff --git a/src/expr/src/expr/value.rs b/src/expr/core/src/expr/value.rs similarity index 71% rename from src/expr/src/expr/value.rs rename to src/expr/core/src/expr/value.rs index 4535aa31bcdf4..e9b95aaec0dfa 100644 --- a/src/expr/src/expr/value.rs +++ b/src/expr/core/src/expr/value.rs @@ -15,7 +15,7 @@ use either::Either; use risingwave_common::array::*; use risingwave_common::for_all_array_variants; -use risingwave_common::types::{Datum, Scalar}; +use risingwave_common::types::{Datum, DatumRef, Scalar, ToDatumRef}; /// The type-erased return value of an expression. /// @@ -26,6 +26,31 @@ pub enum ValueImpl { Scalar { value: Datum, capacity: usize }, } +impl From for ValueImpl { + fn from(value: ArrayRef) -> Self { + Self::Array(value) + } +} + +impl ValueImpl { + /// Number of scalars in this value. + #[inline] + #[expect(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.iter().len() + } + + /// Iterates over all scalars in this value. + pub fn iter(&self) -> impl ExactSizeIterator> + '_ { + match self { + Self::Array(array) => Either::Left(array.iter()), + Self::Scalar { value, capacity } => { + Either::Right(itertools::repeat_n(value.to_datum_ref(), *capacity)) + } + } + } +} + /// The generic reference type of [`ValueImpl`]. Used as the arguments of expressions. #[derive(Debug, Clone, Copy)] pub enum ValueRef<'a, A: Array> { @@ -37,13 +62,18 @@ pub enum ValueRef<'a, A: Array> { } impl<'a, A: Array> ValueRef<'a, A> { + /// Number of scalars in this value. + #[inline] + #[expect(clippy::len_without_is_empty)] + pub fn len(self) -> usize { + self.iter().len() + } + /// Iterates over all scalars in this value. - pub fn iter(self) -> impl Iterator>> + 'a { + pub fn iter(self) -> impl ExactSizeIterator>> + 'a { match self { Self::Array(array) => Either::Left(array.iter()), - Self::Scalar { value, capacity } => { - Either::Right(std::iter::repeat(value).take(capacity)) - } + Self::Scalar { value, capacity } => Either::Right(itertools::repeat_n(value, capacity)), } } } diff --git a/src/expr/core/src/expr/wrapper/checked.rs b/src/expr/core/src/expr/wrapper/checked.rs new file mode 100644 index 0000000000000..b3b1375c4fa82 --- /dev/null +++ b/src/expr/core/src/expr/wrapper/checked.rs @@ -0,0 +1,53 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use async_trait::async_trait; +use risingwave_common::array::{ArrayRef, DataChunk}; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, Datum}; + +use crate::error::Result; +use crate::expr::{Expression, ValueImpl}; + +/// A wrapper of [`Expression`] that does extra checks after evaluation. +#[derive(Debug)] +pub(crate) struct Checked(pub E); + +// TODO: avoid the overhead of extra boxing. +#[async_trait] +impl Expression for Checked { + fn return_type(&self) -> DataType { + self.0.return_type() + } + + async fn eval(&self, input: &DataChunk) -> Result { + let res = self.0.eval(input).await?; + assert_eq!(res.len(), input.capacity()); + Ok(res) + } + + async fn eval_v2(&self, input: &DataChunk) -> Result { + let res = self.0.eval_v2(input).await?; + assert_eq!(res.len(), input.capacity()); + Ok(res) + } + + async fn eval_row(&self, input: &OwnedRow) -> Result { + self.0.eval_row(input).await + } + + fn eval_const(&self) -> Result { + self.0.eval_const() + } +} diff --git a/src/expr/core/src/expr/wrapper/mod.rs b/src/expr/core/src/expr/wrapper/mod.rs new file mode 100644 index 0000000000000..16988a050ad8d --- /dev/null +++ b/src/expr/core/src/expr/wrapper/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub(crate) mod checked; +pub(crate) mod non_strict; + +pub use non_strict::{EvalErrorReport, LogReport}; diff --git a/src/expr/core/src/expr/wrapper/non_strict.rs b/src/expr/core/src/expr/wrapper/non_strict.rs new file mode 100644 index 0000000000000..782456023cdf7 --- /dev/null +++ b/src/expr/core/src/expr/wrapper/non_strict.rs @@ -0,0 +1,143 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use async_trait::async_trait; +use auto_impl::auto_impl; +use risingwave_common::array::{ArrayRef, DataChunk}; +use risingwave_common::row::{OwnedRow, Row}; +use risingwave_common::types::{DataType, Datum}; + +use crate::error::Result; +use crate::expr::{Expression, ValueImpl}; +use crate::ExprError; + +/// Report an error during evaluation. +#[auto_impl(&, Arc)] +pub trait EvalErrorReport: Clone + Send + Sync { + /// Perform the error reporting. + /// + /// Called when an error occurs during row-level evaluation of a non-strict expression, + /// that is, wrapped by [`NonStrict`]. + fn report(&self, error: ExprError); +} + +/// A dummy implementation that panics when called. +/// +/// Used as the type parameter for the expression builder when non-strict evaluation is not +/// required. +impl EvalErrorReport for ! { + fn report(&self, _error: ExprError) { + unreachable!() + } +} + +/// Log the error to report an error during evaluation. +#[derive(Clone)] +pub struct LogReport; + +impl EvalErrorReport for LogReport { + fn report(&self, error: ExprError) { + tracing::error!(%error, "failed to evaluate expression"); + } +} + +/// A wrapper of [`Expression`] that evaluates in a non-strict way. Basically... +/// - When an error occurs during chunk-level evaluation, recompute in row-based execution and pad +/// with NULL for each failed row. +/// - Report all error occurred during row-level evaluation to the [`EvalErrorReport`]. +pub(crate) struct NonStrict { + inner: E, + report: R, +} + +impl std::fmt::Debug for NonStrict +where + E: std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NonStrict") + .field("inner", &self.inner) + .field("report", &std::any::type_name::()) + .finish() + } +} + +impl NonStrict +where + E: Expression, + R: EvalErrorReport, +{ + pub fn new(inner: E, report: R) -> Self { + Self { inner, report } + } + + /// Evaluate expression in row-based execution with `eval_row_infallible`. + async fn eval_chunk_infallible_by_row(&self, input: &DataChunk) -> ArrayRef { + let mut array_builder = self.return_type().create_array_builder(input.capacity()); + for row in input.rows_with_holes() { + if let Some(row) = row { + let datum = self.eval_row_infallible(&row.into_owned_row()).await; // TODO: use `Row` trait + array_builder.append(&datum); + } else { + array_builder.append_null(); + } + } + array_builder.finish().into() + } + + /// Evaluate expression on a single row, report error and return NULL if failed. + async fn eval_row_infallible(&self, input: &OwnedRow) -> Datum { + match self.inner.eval_row(input).await { + Ok(datum) => datum, + Err(error) => { + self.report.report(error); + None // NULL + } + } + } +} + +// TODO: avoid the overhead of extra boxing. +#[async_trait] +impl Expression for NonStrict +where + E: Expression, + R: EvalErrorReport, +{ + fn return_type(&self) -> DataType { + self.inner.return_type() + } + + async fn eval(&self, input: &DataChunk) -> Result { + Ok(match self.inner.eval(input).await { + Ok(array) => array, + Err(_e) => self.eval_chunk_infallible_by_row(input).await, + }) + } + + async fn eval_v2(&self, input: &DataChunk) -> Result { + Ok(match self.inner.eval_v2(input).await { + Ok(value) => value, + Err(_e) => self.eval_chunk_infallible_by_row(input).await.into(), + }) + } + + async fn eval_row(&self, input: &OwnedRow) -> Result { + Ok(self.eval_row_infallible(input).await) + } + + fn eval_const(&self) -> Result { + self.inner.eval_const() // do not handle error + } +} diff --git a/src/expr/src/lib.rs b/src/expr/core/src/lib.rs similarity index 77% rename from src/expr/src/lib.rs rename to src/expr/core/src/lib.rs index ee4cea38e4bb5..b49c4ae161dfc 100644 --- a/src/expr/src/lib.rs +++ b/src/expr/core/src/lib.rs @@ -14,25 +14,25 @@ #![allow(non_snake_case)] // for `ctor` generated code #![feature(let_chains)] -#![feature(assert_matches)] #![feature(lint_reasons)] #![feature(iterator_try_collect)] -#![feature(exclusive_range_pattern)] #![feature(lazy_cell)] -#![feature(round_ties_even)] -#![feature(generators)] -#![feature(test)] +#![feature(coroutines)] #![feature(arc_unwrap_or_clone)] +#![feature(never_type)] -pub mod agg; +extern crate self as risingwave_expr; + +pub mod aggregate; +#[doc(hidden)] +pub mod codegen; mod error; pub mod expr; -pub mod function; +pub mod scalar; pub mod sig; pub mod table_function; -pub mod vector_op; pub mod window_function; -pub use error::{ExprError, Result}; -use risingwave_common::{bail, ensure}; +pub use error::{ContextUnavailable, ExprError, Result}; +pub use risingwave_common::{bail, ensure}; pub use risingwave_expr_macro::*; diff --git a/src/expr/src/vector_op/like.rs b/src/expr/core/src/scalar/like.rs similarity index 99% rename from src/expr/src/vector_op/like.rs rename to src/expr/core/src/scalar/like.rs index 13acc3aab53a7..32894accbcf2c 100644 --- a/src/expr/src/vector_op/like.rs +++ b/src/expr/core/src/scalar/like.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr_macro::function; +use risingwave_expr::function; fn like_impl(s: &str, p: &str) -> bool { let (mut px, mut sx) = (0, 0); diff --git a/src/storage/backup/cmd/src/lib.rs b/src/expr/core/src/scalar/mod.rs similarity index 97% rename from src/storage/backup/cmd/src/lib.rs rename to src/expr/core/src/scalar/mod.rs index b0b2c0a500fb9..483091270fa00 100644 --- a/src/storage/backup/cmd/src/lib.rs +++ b/src/expr/core/src/scalar/mod.rs @@ -11,3 +11,5 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + +pub mod like; diff --git a/src/expr/src/sig/cast.rs b/src/expr/core/src/sig/cast.rs similarity index 100% rename from src/expr/src/sig/cast.rs rename to src/expr/core/src/sig/cast.rs diff --git a/src/expr/core/src/sig/mod.rs b/src/expr/core/src/sig/mod.rs new file mode 100644 index 0000000000000..c2e71b585d49c --- /dev/null +++ b/src/expr/core/src/sig/mod.rs @@ -0,0 +1,419 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Metadata of expressions. + +use std::collections::HashMap; +use std::fmt; +use std::sync::LazyLock; + +use itertools::Itertools; +use risingwave_common::types::DataType; +use risingwave_pb::expr::expr_node::PbType as ScalarFunctionType; +use risingwave_pb::expr::table_function::PbType as TableFunctionType; + +use crate::aggregate::{AggCall, AggKind as AggregateFunctionType, BoxedAggregateFunction}; +use crate::error::Result; +use crate::expr::BoxedExpression; +use crate::table_function::BoxedTableFunction; +use crate::ExprError; + +pub mod cast; + +/// The global registry of all function signatures. +pub static FUNCTION_REGISTRY: LazyLock = LazyLock::new(|| unsafe { + // SAFETY: this function is called after all `#[ctor]` functions are called. + let mut map = FunctionRegistry::default(); + tracing::info!("found {} functions", FUNCTION_REGISTRY_INIT.len()); + for sig in FUNCTION_REGISTRY_INIT.drain(..) { + map.insert(sig); + } + map +}); + +/// A set of function signatures. +#[derive(Default, Clone, Debug)] +pub struct FunctionRegistry(HashMap>); + +impl FunctionRegistry { + /// Inserts a function signature. + pub fn insert(&mut self, sig: FuncSign) { + self.0.entry(sig.name).or_default().push(sig) + } + + /// Returns a function signature with the same type, argument types and return type. + /// Deprecated functions are included. + pub fn get( + &self, + name: impl Into, + args: &[DataType], + ret: &DataType, + ) -> Option<&FuncSign> { + let v = self.0.get(&name.into())?; + v.iter().find(|d| d.match_args_ret(args, ret)) + } + + /// Returns all function signatures with the same type and number of arguments. + /// Deprecated functions are excluded. + pub fn get_with_arg_nums(&self, name: impl Into, nargs: usize) -> Vec<&FuncSign> { + match self.0.get(&name.into()) { + Some(v) => v + .iter() + .filter(|d| d.match_number_of_args(nargs) && !d.deprecated) + .collect(), + None => vec![], + } + } + + /// Returns a function signature with the given type, argument types, return type. + /// + /// The `prefer_append_only` flag only works when both append-only and retractable version exist. + /// Otherwise, return the signature of the only version. + pub fn get_aggregate( + &self, + ty: AggregateFunctionType, + args: &[DataType], + ret: &DataType, + prefer_append_only: bool, + ) -> Option<&FuncSign> { + let v = self.0.get(&ty.into())?; + let mut iter = v.iter().filter(|d| d.match_args_ret(args, ret)); + if iter.clone().count() == 2 { + iter.find(|d| d.append_only == prefer_append_only) + } else { + iter.next() + } + } + + /// Returns the return type for the given function and arguments. + pub fn get_return_type( + &self, + name: impl Into, + args: &[DataType], + ) -> Result { + let name = name.into(); + let v = self + .0 + .get(&name) + .ok_or_else(|| ExprError::UnsupportedFunction(name.to_string()))?; + let sig = v + .iter() + .find(|d| d.match_args(args)) + .ok_or_else(|| ExprError::UnsupportedFunction(name.to_string()))?; + (sig.type_infer)(args) + } + + /// Returns an iterator of all function signatures. + pub fn iter(&self) -> impl Iterator { + self.0.values().flatten() + } + + /// Returns an iterator of all scalar functions. + pub fn iter_scalars(&self) -> impl Iterator { + self.iter().filter(|d| d.is_scalar()) + } + + /// Returns an iterator of all aggregate functions. + pub fn iter_aggregates(&self) -> impl Iterator { + self.iter().filter(|d| d.is_aggregate()) + } +} + +/// A function signature. +#[derive(Clone)] +pub struct FuncSign { + /// The name of the function. + pub name: FuncName, + + /// The argument types. + pub inputs_type: Vec, + + /// Whether the function is variadic. + pub variadic: bool, + + /// The return type. + pub ret_type: SigDataType, + + /// A function to build the expression. + pub build: FuncBuilder, + + /// A function to infer the return type from argument types. + pub type_infer: fn(args: &[DataType]) -> Result, + + /// Whether the function is deprecated and should not be used in the frontend. + /// For backward compatibility, it is still available in the backend. + pub deprecated: bool, + + /// The state type of the aggregate function. + /// `None` means equal to the return type. + pub state_type: Option, + + /// Whether the aggregate function is append-only. + pub append_only: bool, +} + +impl fmt::Debug for FuncSign { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}({}{}) -> {}{}", + self.name.as_str_name().to_ascii_lowercase(), + self.inputs_type.iter().format(", "), + if self.variadic { + if self.inputs_type.is_empty() { + "..." + } else { + ", ..." + } + } else { + "" + }, + if self.name.is_table() { "setof " } else { "" }, + self.ret_type, + )?; + if self.append_only { + write!(f, " [append-only]")?; + } + if self.deprecated { + write!(f, " [deprecated]")?; + } + Ok(()) + } +} + +impl FuncSign { + /// Returns true if the argument types match the function signature. + pub fn match_args(&self, args: &[DataType]) -> bool { + if !self.match_number_of_args(args.len()) { + return false; + } + // allow `zip` as the length of `args` may be larger than `inputs_type` + #[allow(clippy::disallowed_methods)] + self.inputs_type + .iter() + .zip(args.iter()) + .all(|(matcher, arg)| matcher.matches(arg)) + } + + /// Returns true if the argument types match the function signature. + fn match_args_ret(&self, args: &[DataType], ret: &DataType) -> bool { + self.match_args(args) && self.ret_type.matches(ret) + } + + /// Returns true if the number of arguments matches the function signature. + fn match_number_of_args(&self, n: usize) -> bool { + if self.variadic { + n >= self.inputs_type.len() + } else { + n == self.inputs_type.len() + } + } + + /// Returns true if the function is a scalar function. + pub const fn is_scalar(&self) -> bool { + matches!(self.name, FuncName::Scalar(_)) + } + + /// Returns true if the function is a table function. + pub const fn is_table_function(&self) -> bool { + matches!(self.name, FuncName::Table(_)) + } + + /// Returns true if the function is a aggregate function. + pub const fn is_aggregate(&self) -> bool { + matches!(self.name, FuncName::Aggregate(_)) + } + + /// Builds the scalar function. + pub fn build_scalar( + &self, + return_type: DataType, + children: Vec, + ) -> Result { + match self.build { + FuncBuilder::Scalar(f) => f(return_type, children), + _ => panic!("Expected a scalar function"), + } + } + + /// Builds the table function. + pub fn build_table( + &self, + return_type: DataType, + chunk_size: usize, + children: Vec, + ) -> Result { + match self.build { + FuncBuilder::Table(f) => f(return_type, chunk_size, children), + _ => panic!("Expected a table function"), + } + } + + /// Builds the aggregate function. + pub fn build_aggregate(&self, agg: &AggCall) -> Result { + match self.build { + FuncBuilder::Aggregate(f) => f(agg), + _ => panic!("Expected an aggregate function"), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum FuncName { + Scalar(ScalarFunctionType), + Table(TableFunctionType), + Aggregate(AggregateFunctionType), +} + +impl From for FuncName { + fn from(ty: ScalarFunctionType) -> Self { + Self::Scalar(ty) + } +} + +impl From for FuncName { + fn from(ty: TableFunctionType) -> Self { + Self::Table(ty) + } +} + +impl From for FuncName { + fn from(ty: AggregateFunctionType) -> Self { + Self::Aggregate(ty) + } +} + +impl fmt::Display for FuncName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str_name().to_ascii_lowercase()) + } +} + +impl FuncName { + /// Returns the name of the function in `UPPER_CASE` style. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Scalar(ty) => ty.as_str_name(), + Self::Table(ty) => ty.as_str_name(), + Self::Aggregate(ty) => ty.to_protobuf().as_str_name(), + } + } + + /// Returns true if the function is a table function. + const fn is_table(&self) -> bool { + matches!(self, Self::Table(_)) + } + + pub fn as_scalar(&self) -> ScalarFunctionType { + match self { + Self::Scalar(ty) => *ty, + _ => panic!("Expected a scalar function"), + } + } + + pub fn as_aggregate(&self) -> AggregateFunctionType { + match self { + Self::Aggregate(ty) => *ty, + _ => panic!("Expected an aggregate function"), + } + } +} + +/// An extended data type that can be used to declare a function's argument or result type. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum SigDataType { + /// Exact data type + Exact(DataType), + /// Accepts any data type + Any, + /// Accepts any array data type + AnyArray, + /// Accepts any struct data type + AnyStruct, +} + +impl From for SigDataType { + fn from(dt: DataType) -> Self { + SigDataType::Exact(dt) + } +} + +impl std::fmt::Display for SigDataType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Exact(dt) => write!(f, "{}", dt), + Self::Any => write!(f, "any"), + Self::AnyArray => write!(f, "anyarray"), + Self::AnyStruct => write!(f, "anystruct"), + } + } +} + +impl SigDataType { + /// Returns true if the data type matches. + pub fn matches(&self, dt: &DataType) -> bool { + match self { + Self::Exact(ty) => ty == dt, + Self::Any => true, + Self::AnyArray => dt.is_array(), + Self::AnyStruct => dt.is_struct(), + } + } + + /// Returns the exact data type. + pub fn as_exact(&self) -> &DataType { + match self { + Self::Exact(ty) => ty, + t => panic!("expected data type, but got: {t}"), + } + } + + /// Returns true if the data type is exact. + pub fn is_exact(&self) -> bool { + matches!(self, Self::Exact(_)) + } +} + +#[derive(Clone, Copy)] +pub enum FuncBuilder { + Scalar(fn(return_type: DataType, children: Vec) -> Result), + Table( + fn( + return_type: DataType, + chunk_size: usize, + children: Vec, + ) -> Result, + ), + Aggregate(fn(agg: &AggCall) -> Result), +} + +/// Register a function into global registry. +/// +/// # Safety +/// +/// This function must be called sequentially. +/// +/// It is designed to be used by `#[function]` macro. +/// Users SHOULD NOT call this function. +#[doc(hidden)] +pub unsafe fn _register(sig: FuncSign) { + FUNCTION_REGISTRY_INIT.push(sig) +} + +/// The global registry of function signatures on initialization. +/// +/// `#[function]` macro will generate a `#[ctor]` function to register the signature into this +/// vector. The calls are guaranteed to be sequential. The vector will be drained and moved into +/// `FUNCTION_REGISTRY` on the first access of `FUNCTION_REGISTRY`. +static mut FUNCTION_REGISTRY_INIT: Vec = Vec::new(); diff --git a/src/expr/core/src/table_function/empty.rs b/src/expr/core/src/table_function/empty.rs new file mode 100644 index 0000000000000..f9e4b1951988a --- /dev/null +++ b/src/expr/core/src/table_function/empty.rs @@ -0,0 +1,36 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +/// An empty table function that returns nothing. +pub fn empty(return_type: DataType) -> BoxedTableFunction { + Empty { return_type }.boxed() +} + +#[derive(Debug)] +struct Empty { + return_type: DataType, +} + +#[async_trait::async_trait] +impl TableFunction for Empty { + fn return_type(&self) -> DataType { + self.return_type.clone() + } + + async fn eval<'a>(&'a self, _input: &'a DataChunk) -> BoxStream<'a, Result> { + futures_util::stream::empty().boxed() + } +} diff --git a/src/expr/src/table_function/mod.rs b/src/expr/core/src/table_function/mod.rs similarity index 91% rename from src/expr/src/table_function/mod.rs rename to src/expr/core/src/table_function/mod.rs index bf89463adbbdd..2a3028a72e3b5 100644 --- a/src/expr/src/table_function/mod.rs +++ b/src/expr/core/src/table_function/mod.rs @@ -18,23 +18,19 @@ use futures_util::stream::BoxStream; use futures_util::StreamExt; use itertools::Itertools; use risingwave_common::array::{Array, ArrayBuilder, ArrayImpl, ArrayRef, DataChunk}; -use risingwave_common::types::{DataType, DataTypeName, DatumRef}; +use risingwave_common::types::{DataType, DatumRef}; use risingwave_pb::expr::project_set_select_item::SelectItem; use risingwave_pb::expr::table_function::PbType; use risingwave_pb::expr::{PbProjectSetSelectItem, PbTableFunction}; use super::{ExprError, Result}; use crate::expr::{build_from_prost as expr_build_from_prost, BoxedExpression}; -use crate::sig::FuncSigDebug; -mod generate_series; -mod generate_subscripts; -mod jsonb; -mod regexp_matches; +mod empty; mod repeat; -mod unnest; mod user_defined; +pub use self::empty::*; pub use self::repeat::*; use self::user_defined::*; @@ -49,7 +45,7 @@ pub trait TableFunction: std::fmt::Debug + Sync + Send { /// # Contract of the output /// /// The returned `DataChunk` contains exact two columns: - /// - The first column is an I32Array containing row indexes of input chunk. It should be + /// - The first column is an I32Array containing row indices of input chunk. It should be /// monotonically increasing. /// - The second column is the output values. The data type of the column is `return_type`. /// @@ -80,7 +76,7 @@ pub trait TableFunction: std::fmt::Debug + Sync + Send { /// (You don't need to understand this section to implement a `TableFunction`) /// /// The output of the `TableFunction` is different from the output of the `ProjectSet` executor. - /// `ProjectSet` executor uses the row indexes to stitch multiple table functions and produces + /// `ProjectSet` executor uses the row indices to stitch multiple table functions and produces /// `projected_row_id`. /// /// ## Example @@ -133,25 +129,18 @@ pub fn build( chunk_size: usize, children: Vec, ) -> Result { - let args = children - .iter() - .map(|t| t.return_type().into()) - .collect::>(); - let desc = crate::sig::table_function::FUNC_SIG_MAP - .get(func, &args) + let args = children.iter().map(|t| t.return_type()).collect_vec(); + let desc = crate::sig::FUNCTION_REGISTRY + .get(func, &args, &return_type) .ok_or_else(|| { ExprError::UnsupportedFunction(format!( - "{:?}", - FuncSigDebug { - func: func.as_str_name(), - inputs_type: &args, - ret_type: (&return_type).into(), - set_returning: true, - deprecated: false, - } + "{}({}) -> setof {}", + func.as_str_name().to_ascii_lowercase(), + args.iter().format(", "), + return_type, )) })?; - (desc.build)(return_type, chunk_size, children) + desc.build_table(return_type, chunk_size, children) } /// See also [`PbProjectSetSelectItem`] diff --git a/src/expr/src/table_function/repeat.rs b/src/expr/core/src/table_function/repeat.rs similarity index 100% rename from src/expr/src/table_function/repeat.rs rename to src/expr/core/src/table_function/repeat.rs diff --git a/src/expr/src/table_function/user_defined.rs b/src/expr/core/src/table_function/user_defined.rs similarity index 55% rename from src/expr/src/table_function/user_defined.rs rename to src/expr/core/src/table_function/user_defined.rs index cba425c54d5d9..be391ca224bc3 100644 --- a/src/expr/src/table_function/user_defined.rs +++ b/src/expr/core/src/table_function/user_defined.rs @@ -14,9 +14,11 @@ use std::sync::Arc; +use arrow_array::RecordBatch; use arrow_schema::{Field, Fields, Schema, SchemaRef}; +use cfg_or_panic::cfg_or_panic; use futures_util::stream; -use risingwave_common::array::DataChunk; +use risingwave_common::array::{DataChunk, I32Array}; use risingwave_common::bail; use risingwave_udf::ArrowFlightUdfClient; @@ -25,6 +27,7 @@ use super::*; #[derive(Debug)] pub struct UserDefinedTableFunction { children: Vec, + #[allow(dead_code)] arg_schema: SchemaRef, return_type: DataType, client: Arc, @@ -33,13 +36,13 @@ pub struct UserDefinedTableFunction { chunk_size: usize, } -#[cfg(not(madsim))] #[async_trait::async_trait] impl TableFunction for UserDefinedTableFunction { fn return_type(&self) -> DataType { self.return_type.clone() } + #[cfg_or_panic(not(madsim))] async fn eval<'a>(&'a self, input: &'a DataChunk) -> BoxStream<'a, Result> { self.eval_inner(input) } @@ -49,35 +52,89 @@ impl TableFunction for UserDefinedTableFunction { impl UserDefinedTableFunction { #[try_stream(boxed, ok = DataChunk, error = ExprError)] async fn eval_inner<'a>(&'a self, input: &'a DataChunk) { + // evaluate children expressions let mut columns = Vec::with_capacity(self.children.len()); for c in &self.children { - let val = c.eval_checked(input).await?.as_ref().try_into()?; + let val = c.eval(input).await?; columns.push(val); } + let direct_input = DataChunk::new(columns, input.visibility().clone()); - let opts = - arrow_array::RecordBatchOptions::default().with_row_count(Some(input.cardinality())); - let input = - arrow_array::RecordBatch::try_new_with_options(self.arg_schema.clone(), columns, &opts) - .expect("failed to build record batch"); + // compact the input chunk and record the row mapping + let visible_rows = direct_input.visibility().iter_ones().collect_vec(); + let compacted_input = direct_input.compact_cow(); + let arrow_input = RecordBatch::try_from(compacted_input.as_ref())?; + + // call UDTF #[for_await] for res in self .client - .call_stream(&self.identifier, stream::once(async { input })) + .call_stream(&self.identifier, stream::once(async { arrow_input })) .await? { let output = DataChunk::try_from(&res?)?; + self.check_output(&output)?; + + // we send the compacted input to UDF, so we need to map the row indices back to the + // original input + let origin_indices = output + .column_at(0) + .as_int32() + .raw_iter() + // we have checked all indices are non-negative + .map(|idx| visible_rows[idx as usize] as i32) + .collect::(); + + let output = DataChunk::new( + vec![origin_indices.into_ref(), output.column_at(1).clone()], + output.visibility().clone(), + ); yield output; } } + + /// Check if the output chunk is valid. + fn check_output(&self, output: &DataChunk) -> Result<()> { + if output.columns().len() != 2 { + bail!( + "UDF returned {} columns, but expected 2", + output.columns().len() + ); + } + if output.column_at(0).data_type() != DataType::Int32 { + bail!( + "UDF returned {:?} at column 0, but expected {:?}", + output.column_at(0).data_type(), + DataType::Int32, + ); + } + if output.column_at(0).as_int32().raw_iter().any(|i| i < 0) { + bail!("UDF returned negative row index"); + } + if !output + .column_at(1) + .data_type() + .equals_datatype(&self.return_type) + { + bail!( + "UDF returned {:?} at column 1, but expected {:?}", + output.column_at(1).data_type(), + &self.return_type, + ); + } + Ok(()) + } } -#[cfg(not(madsim))] +#[cfg_or_panic(not(madsim))] pub fn new_user_defined(prost: &PbTableFunction, chunk_size: usize) -> Result { let Some(udtf) = &prost.udtf else { bail!("expect UDTF"); }; + // connect to UDF service + let client = crate::expr::expr_udf::get_or_create_flight_client(&udtf.link)?; + let arg_schema = Arc::new(Schema::new( udtf.arg_types .iter() @@ -92,8 +149,6 @@ pub fn new_user_defined(prost: &PbTableFunction, chunk_size: usize) -> Result()?, )); - // connect to UDF service - let client = crate::expr::expr_udf::get_or_create_flight_client(&udtf.link)?; Ok(UserDefinedTableFunction { children: prost.args.iter().map(expr_build_from_prost).try_collect()?, @@ -105,23 +160,3 @@ pub fn new_user_defined(prost: &PbTableFunction, chunk_size: usize) -> Result DataType { - panic!("UDF is not supported in simulation yet"); - } - - async fn eval<'a>(&'a self, input: &'a DataChunk) -> BoxStream<'a, Result> { - panic!("UDF is not supported in simulation yet"); - } -} - -#[cfg(madsim)] -pub fn new_user_defined( - _prost: &PbTableFunction, - _chunk_size: usize, -) -> Result { - panic!("UDF is not supported in simulation yet"); -} diff --git a/src/expr/src/window_function/call.rs b/src/expr/core/src/window_function/call.rs similarity index 99% rename from src/expr/src/window_function/call.rs rename to src/expr/core/src/window_function/call.rs index 11586c6252bcc..a74beb672fd4f 100644 --- a/src/expr/src/window_function/call.rs +++ b/src/expr/core/src/window_function/call.rs @@ -21,7 +21,7 @@ use risingwave_pb::expr::window_frame::{PbBound, PbExclusion}; use risingwave_pb::expr::{PbWindowFrame, PbWindowFunction}; use super::WindowFuncKind; -use crate::agg::AggArgs; +use crate::aggregate::AggArgs; use crate::Result; #[derive(Debug, Clone)] diff --git a/src/expr/src/window_function/kind.rs b/src/expr/core/src/window_function/kind.rs similarity index 66% rename from src/expr/src/window_function/kind.rs rename to src/expr/core/src/window_function/kind.rs index 1b6af945ede63..e2790645a6917 100644 --- a/src/expr/src/window_function/kind.rs +++ b/src/expr/core/src/window_function/kind.rs @@ -15,7 +15,7 @@ use parse_display::{Display, FromStr}; use risingwave_common::bail; -use crate::agg::AggKind; +use crate::aggregate::AggKind; use crate::Result; /// Kind of window functions. @@ -42,18 +42,18 @@ impl WindowFuncKind { use risingwave_pb::expr::window_function::{PbGeneralType, PbType}; let kind = match window_function_type { - PbType::General(typ) => match PbGeneralType::from_i32(*typ) { - Some(PbGeneralType::Unspecified) => bail!("Unspecified window function type"), - Some(PbGeneralType::RowNumber) => Self::RowNumber, - Some(PbGeneralType::Rank) => Self::Rank, - Some(PbGeneralType::DenseRank) => Self::DenseRank, - Some(PbGeneralType::Lag) => Self::Lag, - Some(PbGeneralType::Lead) => Self::Lead, - None => bail!("no such window function type"), + PbType::General(typ) => match PbGeneralType::try_from(*typ) { + Ok(PbGeneralType::Unspecified) => bail!("Unspecified window function type"), + Ok(PbGeneralType::RowNumber) => Self::RowNumber, + Ok(PbGeneralType::Rank) => Self::Rank, + Ok(PbGeneralType::DenseRank) => Self::DenseRank, + Ok(PbGeneralType::Lag) => Self::Lag, + Ok(PbGeneralType::Lead) => Self::Lead, + Err(_) => bail!("no such window function type"), }, - PbType::Aggregate(agg_type) => match PbAggType::from_i32(*agg_type) { - Some(agg_type) => Self::Aggregate(AggKind::from_protobuf(agg_type)?), - None => bail!("no such aggregate function type"), + PbType::Aggregate(agg_type) => match PbAggType::try_from(*agg_type) { + Ok(agg_type) => Self::Aggregate(AggKind::from_protobuf(agg_type)?), + Err(_) => bail!("no such aggregate function type"), }, }; Ok(kind) diff --git a/src/expr/src/window_function/mod.rs b/src/expr/core/src/window_function/mod.rs similarity index 100% rename from src/expr/src/window_function/mod.rs rename to src/expr/core/src/window_function/mod.rs diff --git a/src/expr/src/window_function/state/aggregate.rs b/src/expr/core/src/window_function/state/aggregate.rs similarity index 96% rename from src/expr/src/window_function/state/aggregate.rs rename to src/expr/core/src/window_function/state/aggregate.rs index 15b01fabc2711..7deee85693ef2 100644 --- a/src/expr/src/window_function/state/aggregate.rs +++ b/src/expr/core/src/window_function/state/aggregate.rs @@ -14,7 +14,7 @@ use std::collections::BTreeSet; -use futures::FutureExt; +use futures_util::FutureExt; use risingwave_common::array::{DataChunk, StreamChunk}; use risingwave_common::estimate_size::{EstimateSize, KvSize}; use risingwave_common::types::{DataType, Datum}; @@ -24,8 +24,8 @@ use smallvec::SmallVec; use super::buffer::WindowBuffer; use super::{StateEvictHint, StateKey, StatePos, WindowState}; -use crate::agg::{build as builg_agg, AggArgs, AggCall, BoxedAggregateFunction}; -use crate::function::window::{WindowFuncCall, WindowFuncKind}; +use crate::aggregate::{build_append_only, AggArgs, AggCall, BoxedAggregateFunction}; +use crate::window_function::{WindowFuncCall, WindowFuncKind}; use crate::Result; pub struct AggregateState { @@ -86,7 +86,7 @@ impl WindowState for AggregateState { fn curr_output(&self) -> Result { let wrapper = AggregatorWrapper { - agg: builg_agg(&self.agg_call)?, + agg: build_append_only(&self.agg_call)?, arg_data_types: &self.arg_data_types, }; wrapper.aggregate(self.buffer.curr_window_values().map(SmallVec::as_slice)) diff --git a/src/expr/src/window_function/state/buffer.rs b/src/expr/core/src/window_function/state/buffer.rs similarity index 99% rename from src/expr/src/window_function/state/buffer.rs rename to src/expr/core/src/window_function/state/buffer.rs index 97f68b18375b2..a375c7bfec225 100644 --- a/src/expr/src/window_function/state/buffer.rs +++ b/src/expr/core/src/window_function/state/buffer.rs @@ -17,7 +17,7 @@ use std::ops::Range; use either::Either; -use crate::function::window::{Frame, FrameBounds, FrameExclusion}; +use crate::window_function::{Frame, FrameBounds, FrameExclusion}; struct Entry { key: K, @@ -238,7 +238,7 @@ mod tests { use itertools::Itertools; use super::*; - use crate::function::window::{Frame, FrameBound}; + use crate::window_function::{Frame, FrameBound}; #[test] fn test_rows_frame_unbounded_preceding_to_current_row() { diff --git a/src/expr/src/window_function/state/mod.rs b/src/expr/core/src/window_function/state/mod.rs similarity index 89% rename from src/expr/src/window_function/state/mod.rs rename to src/expr/core/src/window_function/state/mod.rs index becf633107df6..971fb97f66cdc 100644 --- a/src/expr/src/window_function/state/mod.rs +++ b/src/expr/core/src/window_function/state/mod.rs @@ -14,15 +14,14 @@ use std::collections::BTreeSet; +use itertools::Itertools; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::row::OwnedRow; use risingwave_common::types::{Datum, DefaultOrdered}; use risingwave_common::util::memcmp_encoding::MemcmpEncoded; use smallvec::SmallVec; -use super::WindowFuncCall; -use crate::function::window::WindowFuncKind; -use crate::sig::FuncSigDebug; +use super::{WindowFuncCall, WindowFuncKind}; use crate::{ExprError, Result}; mod buffer; @@ -117,18 +116,11 @@ pub fn create_window_state(call: &WindowFuncCall) -> Result Box::new(row_number::RowNumberState::new(call)), Aggregate(_) => Box::new(aggregate::AggregateState::new(call)?), kind => { - let args = (call.args.arg_types().iter()) - .map(|t| t.into()) - .collect::>(); return Err(ExprError::UnsupportedFunction(format!( - "{:?}", - FuncSigDebug { - func: kind, - inputs_type: &args, - ret_type: call.return_type.clone().into(), - set_returning: false, - deprecated: false, - } + "{}({}) -> {}", + kind, + call.args.arg_types().iter().format(", "), + &call.return_type, ))); } }) diff --git a/src/expr/src/window_function/state/row_number.rs b/src/expr/core/src/window_function/state/row_number.rs similarity index 96% rename from src/expr/src/window_function/state/row_number.rs rename to src/expr/core/src/window_function/state/row_number.rs index 01b713cbc1196..fd485292c9382 100644 --- a/src/expr/src/window_function/state/row_number.rs +++ b/src/expr/core/src/window_function/state/row_number.rs @@ -18,7 +18,7 @@ use risingwave_common::types::Datum; use smallvec::SmallVec; use super::{StateEvictHint, StateKey, StatePos, WindowState}; -use crate::function::window::WindowFuncCall; +use crate::window_function::WindowFuncCall; use crate::Result; #[derive(EstimateSize)] @@ -82,8 +82,8 @@ mod tests { use risingwave_common::types::DataType; use super::*; - use crate::agg::AggArgs; - use crate::function::window::{Frame, FrameBound, WindowFuncKind}; + use crate::aggregate::AggArgs; + use crate::window_function::{Frame, FrameBound, WindowFuncKind}; fn create_state_key(pk: i64) -> StateKey { StateKey { diff --git a/src/expr/src/window_function/states.rs b/src/expr/core/src/window_function/states.rs similarity index 100% rename from src/expr/src/window_function/states.rs rename to src/expr/core/src/window_function/states.rs diff --git a/src/expr/impl/Cargo.toml b/src/expr/impl/Cargo.toml new file mode 100644 index 0000000000000..81cd685c4dc27 --- /dev/null +++ b/src/expr/impl/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "risingwave_expr_impl" +description = "RisingWave function implementations." +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[package.metadata.cargo-machete] +ignored = ["workspace-hack", "ctor"] + +[package.metadata.cargo-udeps.ignore] +normal = ["workspace-hack", "ctor"] + +[dependencies] +aho-corasick = "1" +anyhow = "1" +async-trait = "0.1" +auto_enums = "0.8" +chrono = { version = "0.4", default-features = false, features = [ + "clock", + "std", +] } +fancy-regex = "0.11" +futures-async-stream = { workspace = true } +futures-util = "0.3" +hex = "0.4" +itertools = "0.11" +md5 = "0.7" +num-traits = "0.2" +regex = "1" +risingwave_common = { workspace = true } +risingwave_expr = { workspace = true } +risingwave_pb = { workspace = true } +rust_decimal = { version = "1", features = ["db-postgres", "maths"] } +self_cell = "1.0.1" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +sha1 = "0.10" +sha2 = "0.10" +thiserror = "1" +tokio = { version = "0.2", package = "madsim-tokio", features = ["time"] } +tracing = "0.1" + +[target.'cfg(not(madsim))'.dependencies] +workspace-hack = { path = "../../workspace-hack" } + +[dev-dependencies] +criterion = { workspace = true } +expect-test = "1" +tokio = { version = "0.2", package = "madsim-tokio", features = [ + "rt", + "macros", +] } + +[[bench]] +name = "expr" +harness = false + +[lints] +workspace = true diff --git a/src/expr/README.md b/src/expr/impl/README.md similarity index 100% rename from src/expr/README.md rename to src/expr/impl/README.md diff --git a/src/expr/benches/expr.rs b/src/expr/impl/benches/expr.rs similarity index 75% rename from src/expr/benches/expr.rs rename to src/expr/impl/benches/expr.rs index f3bac5eac107d..1e84d8d8e4825 100644 --- a/src/expr/benches/expr.rs +++ b/src/expr/impl/benches/expr.rs @@ -19,15 +19,16 @@ // `zip_eq` is a source of poor performance. #![allow(clippy::disallowed_methods)] +risingwave_expr_impl::enable!(); + use criterion::async_executor::FuturesExecutor; use criterion::{criterion_group, criterion_main, Criterion}; use risingwave_common::array::*; use risingwave_common::types::test_utils::IntervalTestExt; use risingwave_common::types::*; -use risingwave_expr::agg::{build as build_agg, AggArgs, AggCall}; +use risingwave_expr::aggregate::{build_append_only, AggArgs, AggCall, AggKind}; use risingwave_expr::expr::*; -use risingwave_expr::sig::agg::agg_func_sigs; -use risingwave_expr::sig::func::func_sigs; +use risingwave_expr::sig::FUNCTION_REGISTRY; use risingwave_expr::ExprError; use risingwave_pb::expr::expr_node::PbType; @@ -88,20 +89,42 @@ fn bench_expr(c: &mut Criterion) { .into_ref(), // 16: extract field for date Utf8Array::from_iter_display( - ["DAY", "MONTH", "YEAR", "DOW", "DOY"] - .into_iter() - .cycle() - .take(CHUNK_SIZE) - .map(Some), + [ + "DAY", + "MONTH", + "YEAR", + "DOW", + "DOY", + "MILLENNIUM", + "CENTURY", + "DECADE", + "ISOYEAR", + "QUARTER", + "WEEK", + "ISODOW", + "EPOCH", + "JULIAN", + ] + .into_iter() + .cycle() + .take(CHUNK_SIZE) + .map(Some), ) .into_ref(), // 17: extract field for time Utf8Array::from_iter_display( - ["HOUR", "MINUTE", "SECOND"] - .into_iter() - .cycle() - .take(CHUNK_SIZE) - .map(Some), + [ + "Hour", + "Minute", + "Second", + "Millisecond", + "Microsecond", + "Epoch", + ] + .into_iter() + .cycle() + .take(CHUNK_SIZE) + .map(Some), ) .into_ref(), // 18: extract field for timestamptz @@ -151,6 +174,38 @@ fn bench_expr(c: &mut Criterion) { (1..=CHUNK_SIZE).map(|i| JsonbVal::from(serde_json::Value::Number(i.into()))), ) .into_ref(), + // 27: int256 array + Int256Array::from_iter((1..=CHUNK_SIZE).map(|_| Int256::from(1))).into_ref(), + // 28: extract field for interval + Utf8Array::from_iter_display( + [ + "Millennium", + "Century", + "Decade", + "Year", + "Month", + "Day", + "Hour", + "Minute", + "Second", + "Millisecond", + "Microsecond", + "Epoch", + ] + .into_iter() + .cycle() + .take(CHUNK_SIZE) + .map(Some), + ) + .into_ref(), + // 29: timestamp string for to_timestamp + Utf8Array::from_iter_display( + [Some("2021/04/01 00:00:00")] + .into_iter() + .cycle() + .take(CHUNK_SIZE), + ) + .into_ref(), ], CHUNK_SIZE, )); @@ -171,11 +226,12 @@ fn bench_expr(c: &mut Criterion) { InputRefExpression::new(DataType::Varchar, 12), InputRefExpression::new(DataType::Bytea, 13), InputRefExpression::new(DataType::Jsonb, 26), + InputRefExpression::new(DataType::Int256, 27), ]; - let input_index_for_type = |ty: DataType| { + let input_index_for_type = |ty: &DataType| { inputrefs .iter() - .find(|r| r.return_type() == ty) + .find(|r| &r.return_type() == ty) .unwrap_or_else(|| panic!("expression not found for {ty:?}")) .index() }; @@ -185,6 +241,7 @@ fn bench_expr(c: &mut Criterion) { const EXTRACT_FIELD_TIME: usize = 17; const EXTRACT_FIELD_TIMESTAMP: usize = 16; const EXTRACT_FIELD_TIMESTAMPTZ: usize = 18; + const EXTRACT_FIELD_INTERVAL: usize = 28; const BOOL_STRING: usize = 19; const NUMBER_STRING: usize = 12; const DATE_STRING: usize = 20; @@ -192,6 +249,7 @@ fn bench_expr(c: &mut Criterion) { const TIMESTAMP_STRING: usize = 22; const TIMESTAMPTZ_STRING: usize = 23; const INTERVAL_STRING: usize = 24; + const TIMESTAMP_FORMATTED_STRING: usize = 29; c.bench_function("inputref", |bencher| { let inputref = inputrefs[0].clone().boxed(); @@ -208,41 +266,58 @@ fn bench_expr(c: &mut Criterion) { c.bench_function("extract(constant)", |bencher| { let extract = build_from_pretty(format!( "(extract:decimal HOUR:varchar ${}:timestamp)", - input_index_for_type(DataType::Timestamp) + input_index_for_type(&DataType::Timestamp) )); bencher .to_async(FuturesExecutor) .iter(|| extract.eval(&input)) }); - let sigs = func_sigs(); - let sigs = sigs.sorted_by_cached_key(|sig| format!("{sig:?}")); + let sigs = FUNCTION_REGISTRY + .iter_scalars() + .sorted_by_cached_key(|sig| format!("{sig:?}")); 'sig: for sig in sigs { - if sig - .inputs_type - .iter() - .any(|t| matches!(t, DataTypeName::Struct | DataTypeName::List)) + if (sig.inputs_type.iter()) + .chain([&sig.ret_type]) + .any(|t| !t.is_exact()) { // TODO: support struct and list println!("todo: {sig:?}"); continue; } + if [ + "date_trunc(varchar, timestamptz) -> timestamptz", + "to_timestamp1(varchar, varchar) -> timestamptz", + "to_char(timestamptz, varchar) -> varchar", + ] + .contains(&format!("{sig:?}").as_str()) + { + println!("ignore: {sig:?}"); + continue; + } + + fn string_literal(s: &str) -> BoxedExpression { + LiteralExpression::new(DataType::Varchar, Some(s.into())).boxed() + } let mut children = vec![]; for (i, t) in sig.inputs_type.iter().enumerate() { - use DataTypeName::*; - let idx = match (sig.func, i) { - (PbType::ToChar, 1) => { - children.push( - LiteralExpression::new( - DataType::Varchar, - Some("YYYY/MM/DD HH:MM:SS".into()), - ) - .boxed(), - ); + use DataType::*; + let idx = match (sig.name.as_scalar(), i) { + (PbType::ToTimestamp1, 0) => TIMESTAMP_FORMATTED_STRING, + (PbType::ToChar | PbType::ToTimestamp1, 1) => { + children.push(string_literal("YYYY/MM/DD HH:MM:SS")); + continue; + } + (PbType::ToChar | PbType::ToTimestamp1, 2) => { + children.push(string_literal("Australia/Sydney")); continue; } - (PbType::Cast, 0) if *t == DataTypeName::Varchar => match sig.ret_type { + (PbType::IsJson, 1) => { + children.push(string_literal("VALUE")); + continue; + } + (PbType::Cast, 0) if t.as_exact() == &Varchar => match sig.ret_type.as_exact() { Boolean => BOOL_STRING, Int16 | Int32 | Int64 | Float32 | Float64 | Decimal => NUMBER_STRING, Date => DATE_STRING, @@ -259,35 +334,54 @@ fn bench_expr(c: &mut Criterion) { (PbType::AtTimeZone, 1) => TIMEZONE, (PbType::DateTrunc, 0) => TIME_FIELD, (PbType::DateTrunc, 2) => TIMEZONE, - (PbType::Extract, 0) => match sig.inputs_type[1] { + (PbType::Extract, 0) => match sig.inputs_type[1].as_exact() { Date => EXTRACT_FIELD_DATE, Time => EXTRACT_FIELD_TIME, Timestamp => EXTRACT_FIELD_TIMESTAMP, Timestamptz => EXTRACT_FIELD_TIMESTAMPTZ, + Interval => EXTRACT_FIELD_INTERVAL, t => panic!("unexpected type: {t:?}"), }, - _ => input_index_for_type((*t).into()), + _ => input_index_for_type(t.as_exact()), }; - children.push(InputRefExpression::new(DataType::from(*t), idx).boxed()); + children.push(InputRefExpression::new(t.as_exact().clone(), idx).boxed()); } - let expr = build_func(sig.func, sig.ret_type.into(), children).unwrap(); + let expr = build_func( + sig.name.as_scalar(), + sig.ret_type.as_exact().clone(), + children, + ) + .unwrap(); c.bench_function(&format!("{sig:?}"), |bencher| { bencher.to_async(FuturesExecutor).iter(|| expr.eval(&input)) }); } - for sig in agg_func_sigs() { - if sig.inputs_type.len() != 1 { + let sigs = FUNCTION_REGISTRY + .iter_aggregates() + .sorted_by_cached_key(|sig| format!("{sig:?}")); + for sig in sigs { + if matches!( + sig.name.as_aggregate(), + AggKind::PercentileDisc | AggKind::PercentileCont + ) || (sig.inputs_type.iter()) + .chain([&sig.ret_type]) + .any(|t| !t.is_exact()) + { println!("todo: {sig:?}"); continue; } - let agg = match build_agg(&AggCall { - kind: sig.func, - args: AggArgs::Unary( - sig.inputs_type[0].into(), - input_index_for_type(sig.inputs_type[0].into()), - ), - return_type: sig.ret_type.into(), + let agg = match build_append_only(&AggCall { + kind: sig.name.as_aggregate(), + args: match sig.inputs_type.as_slice() { + [] => AggArgs::None, + [t] => AggArgs::Unary(t.as_exact().clone(), input_index_for_type(t.as_exact())), + _ => { + println!("todo: {sig:?}"); + continue; + } + }, + return_type: sig.ret_type.as_exact().clone(), column_orders: vec![], filter: None, distinct: false, diff --git a/src/expr/src/agg/approx_count_distinct/append_only.rs b/src/expr/impl/src/aggregate/approx_count_distinct/append_only.rs similarity index 97% rename from src/expr/src/agg/approx_count_distinct/append_only.rs rename to src/expr/impl/src/aggregate/approx_count_distinct/append_only.rs index d4a349147d31f..fc8a0caed2c9f 100644 --- a/src/expr/src/agg/approx_count_distinct/append_only.rs +++ b/src/expr/impl/src/aggregate/approx_count_distinct/append_only.rs @@ -14,9 +14,9 @@ use risingwave_common::bail; use risingwave_common::estimate_size::EstimateSize; +use risingwave_expr::Result; use super::Bucket; -use crate::Result; #[derive(Clone, Copy, Default, Debug, EstimateSize)] pub struct AppendOnlyBucket(pub u8); diff --git a/src/expr/src/agg/approx_count_distinct/mod.rs b/src/expr/impl/src/aggregate/approx_count_distinct/mod.rs similarity index 77% rename from src/expr/src/agg/approx_count_distinct/mod.rs rename to src/expr/impl/src/aggregate/approx_count_distinct/mod.rs index 98eb203015a82..d5b9d715b7317 100644 --- a/src/expr/src/agg/approx_count_distinct/mod.rs +++ b/src/expr/impl/src/aggregate/approx_count_distinct/mod.rs @@ -15,7 +15,6 @@ use std::collections::hash_map::DefaultHasher; use std::fmt::Debug; use std::hash::{Hash, Hasher}; -use std::marker::PhantomData; use std::ops::Range; use risingwave_common::array::{Op, StreamChunk}; @@ -23,12 +22,11 @@ use risingwave_common::bail; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::row::Row; use risingwave_common::types::*; -use risingwave_expr_macro::build_aggregate; +use risingwave_expr::aggregate::{AggCall, AggStateDyn, AggregateFunction, AggregateState}; +use risingwave_expr::{build_aggregate, ExprError, Result}; use self::append_only::AppendOnlyBucket; use self::updatable::UpdatableBucket; -use super::{AggCall, AggStateDyn, AggregateFunction, AggregateState}; -use crate::Result; mod append_only; mod updatable; @@ -43,19 +41,21 @@ const LOG_COUNT_BITS: u8 = 6; const BIAS_CORRECTION: f64 = 0.7213 / (1. + (1.079 / NUM_OF_REGISTERS as f64)); /// Count the approximate number of unique non-null values. -#[build_aggregate("approx_count_distinct(*) -> int64")] -fn build(_agg: &AggCall) -> Result> { - Ok(Box::new(ApproxCountDistinct:: { - _mark: PhantomData, - })) +#[build_aggregate("approx_count_distinct(*) -> int8", state = "int8")] +fn build_updatable(_agg: &AggCall) -> Result> { + Ok(Box::new(UpdatableApproxCountDistinct)) } -struct ApproxCountDistinct { - _mark: PhantomData, +/// Count the approximate number of unique non-null values. +#[build_aggregate("approx_count_distinct(*) -> int8", state = "int8[]", append_only)] +fn build_append_only(_agg: &AggCall) -> Result> { + Ok(Box::new(AppendOnlyApproxCountDistinct)) } +struct UpdatableApproxCountDistinct; + #[async_trait::async_trait] -impl AggregateFunction for ApproxCountDistinct { +impl AggregateFunction for UpdatableApproxCountDistinct { fn return_type(&self) -> DataType { DataType::Int64 } @@ -95,6 +95,115 @@ impl AggregateFunction for ApproxCountDistinct { let state = state.downcast_ref::(); Ok(Some(state.calculate_result().into())) } + + fn encode_state(&self, state: &AggregateState) -> Result { + let state = state.downcast_ref::(); + // FIXME: store state of updatable registers properly + Ok(Some(ScalarImpl::Int64(state.calculate_result()))) + } + + fn decode_state(&self, datum: Datum) -> Result { + // FIXME: restore state of updatable registers properly + let Some(ScalarImpl::Int64(initial_count)) = datum else { + return Err(ExprError::InvalidState("expect int8".into())); + }; + Ok(AggregateState::Any(Box::new(UpdatableRegisters { + initial_count, + ..UpdatableRegisters::default() + }))) + } +} + +struct AppendOnlyApproxCountDistinct; + +#[async_trait::async_trait] +impl AggregateFunction for AppendOnlyApproxCountDistinct { + fn return_type(&self) -> DataType { + DataType::Int64 + } + + fn create_state(&self) -> AggregateState { + AggregateState::Any(Box::::default()) + } + + async fn update(&self, state: &mut AggregateState, input: &StreamChunk) -> Result<()> { + let state = state.downcast_mut::(); + for (op, row) in input.rows() { + let retract = matches!(op, Op::Delete | Op::UpdateDelete); + if let Some(scalar) = row.datum_at(0) { + state.update(scalar, retract)?; + } + } + Ok(()) + } + + async fn update_range( + &self, + state: &mut AggregateState, + input: &StreamChunk, + range: Range, + ) -> Result<()> { + let state = state.downcast_mut::(); + for (op, row) in input.rows_in(range) { + let retract = matches!(op, Op::Delete | Op::UpdateDelete); + if let Some(scalar) = row.datum_at(0) { + state.update(scalar, retract)?; + } + } + Ok(()) + } + + async fn get_result(&self, state: &AggregateState) -> Result { + let state = state.downcast_ref::(); + Ok(Some(state.calculate_result().into())) + } + + fn encode_state(&self, state: &AggregateState) -> Result { + let reg = state.downcast_ref::(); + + let buckets = ®.registers[..]; + let result_len = (buckets.len() * LOG_COUNT_BITS as usize - 1) / (i64::BITS as usize) + 1; + let mut result = vec![0u64; result_len]; + for (i, bucket_val) in buckets.iter().enumerate() { + let (start_idx, begin_bit, post_end_bit) = pos_in_serialized(i); + result[start_idx] |= (buckets[i].0 as u64) << begin_bit; + if post_end_bit > i64::BITS { + result[start_idx + 1] |= (bucket_val.0 as u64) >> (i64::BITS - begin_bit as u32); + } + } + Ok(Some(ScalarImpl::List(ListValue::new( + result + .into_iter() + .map(|x| Some(ScalarImpl::Int64(x as i64))) + .collect(), + )))) + } + + fn decode_state(&self, datum: Datum) -> Result { + let scalar = datum.unwrap(); + let list = scalar.as_list().values(); + let bucket_num = list.len() * i64::BITS as usize / LOG_COUNT_BITS as usize; + let registers = (0..bucket_num) + .map(|i| { + let (start_idx, begin_bit, post_end_bit) = pos_in_serialized(i); + let val = *list[start_idx].as_ref().unwrap().as_int64(); + let v = if post_end_bit <= i64::BITS { + (val as u64) << (i64::BITS - post_end_bit) + >> (i64::BITS - LOG_COUNT_BITS as u32) + } else { + ((val as u64) >> begin_bit) + + (((*list[start_idx + 1].as_ref().unwrap().as_int64() as u64) + & ((1 << (post_end_bit - i64::BITS)) - 1)) + << (i64::BITS - begin_bit as u32)) + }; + AppendOnlyBucket(v as u8) + }) + .collect(); + Ok(AggregateState::Any(Box::new(AppendOnlyRegisters { + registers, + initial_count: 0, + }))) + } } /// Approximates the count of non-null rows using a modified version of the `HyperLogLog` algorithm. @@ -215,75 +324,6 @@ impl EstimateSize for Registers { } } -/// Serialize the state into a scalar. -impl From for ScalarImpl { - fn from(reg: AppendOnlyRegisters) -> Self { - let buckets = ®.registers[..]; - let result_len = (buckets.len() * LOG_COUNT_BITS as usize - 1) / (i64::BITS as usize) + 1; - let mut result = vec![0u64; result_len]; - for (i, bucket_val) in buckets.iter().enumerate() { - let (start_idx, begin_bit, post_end_bit) = pos_in_serialized(i); - result[start_idx] |= (buckets[i].0 as u64) << begin_bit; - if post_end_bit > i64::BITS { - result[start_idx + 1] |= (bucket_val.0 as u64) >> (i64::BITS - begin_bit as u32); - } - } - ScalarImpl::List(ListValue::new( - result - .into_iter() - .map(|x| Some(ScalarImpl::Int64(x as i64))) - .collect(), - )) - } -} - -/// Deserialize the state from a scalar. -impl From for AppendOnlyRegisters { - fn from(state: ScalarImpl) -> Self { - let list = state.as_list().values(); - let bucket_num = list.len() * i64::BITS as usize / LOG_COUNT_BITS as usize; - let registers = (0..bucket_num) - .map(|i| { - let (start_idx, begin_bit, post_end_bit) = pos_in_serialized(i); - let val = *list[start_idx].as_ref().unwrap().as_int64(); - let v = if post_end_bit <= i64::BITS { - (val as u64) << (i64::BITS - post_end_bit) - >> (i64::BITS - LOG_COUNT_BITS as u32) - } else { - ((val as u64) >> begin_bit) - + (((*list[start_idx + 1].as_ref().unwrap().as_int64() as u64) - & ((1 << (post_end_bit - i64::BITS)) - 1)) - << (i64::BITS - begin_bit as u32)) - }; - AppendOnlyBucket(v as u8) - }) - .collect(); - Self { - registers, - initial_count: 0, - } - } -} - -/// Serialize the state into a scalar. -impl From for ScalarImpl { - fn from(reg: UpdatableRegisters) -> Self { - // FIXME: store state of updatable registers properly - ScalarImpl::Int64(reg.calculate_result()) - } -} - -/// Deserialize the state from a scalar. -impl From for UpdatableRegisters { - fn from(state: ScalarImpl) -> Self { - // FIXME: restore state of updatable registers properly - Self { - initial_count: state.into_int64(), - ..Self::default() - } - } -} - fn pos_in_serialized(bucket_idx: usize) -> (usize, usize, u32) { // rust compiler will optimize for us let start_idx = bucket_idx * LOG_COUNT_BITS as usize / i64::BITS as usize; @@ -296,12 +336,11 @@ fn pos_in_serialized(bucket_idx: usize) -> (usize, usize, u32) { mod tests { use futures_util::FutureExt; use risingwave_common::array::{Array, DataChunk, I32Array, StreamChunk}; - - use crate::agg::AggCall; + use risingwave_expr::aggregate::{build_append_only, AggCall}; #[test] fn test() { - let approx_count_distinct = crate::agg::build(&AggCall::from_pretty( + let approx_count_distinct = build_append_only(&AggCall::from_pretty( "(approx_count_distinct:int8 $0:int4)", )) .unwrap(); diff --git a/src/expr/src/agg/approx_count_distinct/updatable.rs b/src/expr/impl/src/aggregate/approx_count_distinct/updatable.rs similarity index 100% rename from src/expr/src/agg/approx_count_distinct/updatable.rs rename to src/expr/impl/src/aggregate/approx_count_distinct/updatable.rs diff --git a/src/expr/src/agg/array_agg.rs b/src/expr/impl/src/aggregate/array_agg.rs similarity index 79% rename from src/expr/src/agg/array_agg.rs rename to src/expr/impl/src/aggregate/array_agg.rs index 5e22b68dba70d..963d56ed08621 100644 --- a/src/expr/src/agg/array_agg.rs +++ b/src/expr/impl/src/aggregate/array_agg.rs @@ -13,13 +13,13 @@ // limitations under the License. use risingwave_common::array::ListValue; -use risingwave_common::types::{Datum, ScalarRef}; -use risingwave_expr_macro::aggregate; +use risingwave_common::types::{Datum, ScalarRefImpl, ToOwnedDatum}; +use risingwave_expr::aggregate; -#[aggregate("array_agg(*) -> list")] -fn array_agg<'a>(state: Option, value: Option>) -> ListValue { +#[aggregate("array_agg(any) -> anyarray")] +fn array_agg(state: Option, value: Option>) -> ListValue { let mut state: Vec = state.unwrap_or_default().into(); - state.push(value.map(|v| v.to_owned_scalar().into())); + state.push(value.to_owned_datum()); state.into() } @@ -27,9 +27,8 @@ fn array_agg<'a>(state: Option, value: Option>) -> mod tests { use risingwave_common::array::{ListValue, StreamChunk}; use risingwave_common::test_prelude::StreamChunkTestExt; - - use crate::agg::AggCall; - use crate::Result; + use risingwave_expr::aggregate::{build_append_only, AggCall}; + use risingwave_expr::Result; #[tokio::test] async fn test_array_agg_basic() -> Result<()> { @@ -39,7 +38,7 @@ mod tests { + 456 + 789", ); - let array_agg = crate::agg::build(&AggCall::from_pretty("(array_agg:int4[] $0:int4)"))?; + let array_agg = build_append_only(&AggCall::from_pretty("(array_agg:int4[] $0:int4)"))?; let mut state = array_agg.create_state(); array_agg.update(&mut state, &chunk).await?; let actual = array_agg.get_result(&state).await?; @@ -52,7 +51,7 @@ mod tests { #[tokio::test] async fn test_array_agg_empty() -> Result<()> { - let array_agg = crate::agg::build(&AggCall::from_pretty("(array_agg:int4[] $0:int4)"))?; + let array_agg = build_append_only(&AggCall::from_pretty("(array_agg:int4[] $0:int4)"))?; let mut state = array_agg.create_state(); assert_eq!(array_agg.get_result(&state).await?, None); diff --git a/src/expr/impl/src/aggregate/bit_and.rs b/src/expr/impl/src/aggregate/bit_and.rs new file mode 100644 index 0000000000000..879f81704b14a --- /dev/null +++ b/src/expr/impl/src/aggregate/bit_and.rs @@ -0,0 +1,191 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::marker::PhantomData; +use std::ops::BitAnd; + +use risingwave_common::types::{ListRef, ListValue, ScalarImpl}; +use risingwave_expr::aggregate; + +/// Computes the bitwise AND of all non-null input values. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (a int2, b int4, c int8); +/// +/// query III +/// select bit_and(a), bit_and(b), bit_and(c) from t; +/// ---- +/// NULL NULL NULL +/// +/// statement ok +/// insert into t values +/// (6, 6, 6), +/// (3, 3, 3), +/// (null, null, null); +/// +/// query III +/// select bit_and(a), bit_and(b), bit_and(c) from t; +/// ---- +/// 2 2 2 +/// +/// statement ok +/// drop table t; +/// ``` +// XXX: state = "ref" is required so that +// for the first non-null value, the state is set to that value. +#[aggregate("bit_and(*int) -> auto", state = "ref")] +fn bit_and_append_only(state: T, input: T) -> T +where + T: BitAnd, +{ + state.bitand(input) +} + +/// Computes the bitwise AND of all non-null input values. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (a int2, b int4, c int8); +/// +/// statement ok +/// create materialized view mv as +/// select bit_and(a) a, bit_and(b) b, bit_and(c) c from t; +/// +/// query III +/// select * from mv; +/// ---- +/// NULL NULL NULL +/// +/// statement ok +/// insert into t values +/// (6, 6, 6), +/// (3, 3, 3), +/// (null, null, null); +/// +/// query III +/// select * from mv; +/// ---- +/// 2 2 2 +/// +/// statement ok +/// delete from t where a = 3; +/// +/// query III +/// select * from mv; +/// ---- +/// 6 6 6 +/// +/// statement ok +/// drop materialized view mv; +/// +/// statement ok +/// drop table t; +/// ``` +#[derive(Debug, Default, Clone)] +struct BitAndUpdatable { + _phantom: PhantomData, +} + +#[aggregate("bit_and(int2) -> int2", state = "int8[]", generic = "i16")] +#[aggregate("bit_and(int4) -> int4", state = "int8[]", generic = "i32")] +#[aggregate("bit_and(int8) -> int8", state = "int8[]", generic = "i64")] +impl BitAndUpdatable { + // state is the number of 0s for each bit. + + fn create_state(&self) -> ListValue { + ListValue::new(vec![Some(ScalarImpl::Int64(0)); T::BITS]) + } + + fn accumulate(&self, mut state: ListValue, input: T) -> ListValue { + for i in 0..T::BITS { + if !input.get_bit(i) { + let Some(ScalarImpl::Int64(count)) = &mut state[i] else { + panic!("invalid state"); + }; + *count += 1; + } + } + state + } + + fn retract(&self, mut state: ListValue, input: T) -> ListValue { + for i in 0..T::BITS { + if !input.get_bit(i) { + let Some(ScalarImpl::Int64(count)) = &mut state[i] else { + panic!("invalid state"); + }; + *count -= 1; + } + } + state + } + + fn finalize(&self, state: ListRef<'_>) -> T { + let mut result = T::default(); + for i in 0..T::BITS { + let count = state.get(i).unwrap().unwrap().into_int64(); + if count == 0 { + result.set_bit(i); + } + } + result + } +} + +pub trait Bits: Default { + const BITS: usize; + fn get_bit(&self, i: usize) -> bool; + fn set_bit(&mut self, i: usize); +} + +impl Bits for i16 { + const BITS: usize = 16; + + fn get_bit(&self, i: usize) -> bool { + (*self >> i) & 1 == 1 + } + + fn set_bit(&mut self, i: usize) { + *self |= 1 << i; + } +} + +impl Bits for i32 { + const BITS: usize = 32; + + fn get_bit(&self, i: usize) -> bool { + (*self >> i) & 1 == 1 + } + + fn set_bit(&mut self, i: usize) { + *self |= 1 << i; + } +} + +impl Bits for i64 { + const BITS: usize = 64; + + fn get_bit(&self, i: usize) -> bool { + (*self >> i) & 1 == 1 + } + + fn set_bit(&mut self, i: usize) { + *self |= 1 << i; + } +} diff --git a/src/expr/impl/src/aggregate/bit_or.rs b/src/expr/impl/src/aggregate/bit_or.rs new file mode 100644 index 0000000000000..1bf205f335e8b --- /dev/null +++ b/src/expr/impl/src/aggregate/bit_or.rs @@ -0,0 +1,149 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::marker::PhantomData; +use std::ops::BitOr; + +use risingwave_common::types::{ListRef, ListValue, ScalarImpl}; +use risingwave_expr::aggregate; + +use super::bit_and::Bits; + +/// Computes the bitwise OR of all non-null input values. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (a int2, b int4, c int8); +/// +/// query III +/// select bit_or(a), bit_or(b), bit_or(c) from t; +/// ---- +/// NULL NULL NULL +/// +/// statement ok +/// insert into t values +/// (1, 1, 1), +/// (2, 2, 2), +/// (null, null, null); +/// +/// query III +/// select bit_or(a), bit_or(b), bit_or(c) from t; +/// ---- +/// 3 3 3 +/// +/// statement ok +/// drop table t; +/// ``` +#[aggregate("bit_or(*int) -> auto")] +fn bit_or_append_only(state: T, input: T) -> T +where + T: BitOr, +{ + state.bitor(input) +} + +/// Computes the bitwise OR of all non-null input values. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (a int2, b int4, c int8); +/// +/// statement ok +/// create materialized view mv as +/// select bit_or(a) a, bit_or(b) b, bit_or(c) c from t; +/// +/// query III +/// select * from mv; +/// ---- +/// NULL NULL NULL +/// +/// statement ok +/// insert into t values +/// (6, 6, 6), +/// (3, 3, 3), +/// (null, null, null); +/// +/// query III +/// select * from mv; +/// ---- +/// 7 7 7 +/// +/// statement ok +/// delete from t where a = 3; +/// +/// query III +/// select * from mv; +/// ---- +/// 6 6 6 +/// +/// statement ok +/// drop materialized view mv; +/// +/// statement ok +/// drop table t; +/// ``` +#[derive(Debug, Default, Clone)] +struct BitOrUpdatable { + _phantom: PhantomData, +} + +#[aggregate("bit_or(int2) -> int2", state = "int8[]", generic = "i16")] +#[aggregate("bit_or(int4) -> int4", state = "int8[]", generic = "i32")] +#[aggregate("bit_or(int8) -> int8", state = "int8[]", generic = "i64")] +impl BitOrUpdatable { + // state is the number of 1s for each bit. + + fn create_state(&self) -> ListValue { + ListValue::new(vec![Some(ScalarImpl::Int64(0)); T::BITS]) + } + + fn accumulate(&self, mut state: ListValue, input: T) -> ListValue { + for i in 0..T::BITS { + if input.get_bit(i) { + let Some(ScalarImpl::Int64(count)) = &mut state[i] else { + panic!("invalid state"); + }; + *count += 1; + } + } + state + } + + fn retract(&self, mut state: ListValue, input: T) -> ListValue { + for i in 0..T::BITS { + if input.get_bit(i) { + let Some(ScalarImpl::Int64(count)) = &mut state[i] else { + panic!("invalid state"); + }; + *count -= 1; + } + } + state + } + + fn finalize(&self, state: ListRef<'_>) -> T { + let mut result = T::default(); + for i in 0..T::BITS { + let count = state.get(i).unwrap().unwrap().into_int64(); + if count != 0 { + result.set_bit(i); + } + } + result + } +} diff --git a/src/expr/impl/src/aggregate/bit_xor.rs b/src/expr/impl/src/aggregate/bit_xor.rs new file mode 100644 index 0000000000000..d86098105daf7 --- /dev/null +++ b/src/expr/impl/src/aggregate/bit_xor.rs @@ -0,0 +1,52 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::ops::BitXor; + +use risingwave_expr::aggregate; + +/// Computes the bitwise XOR of all non-null input values. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (a int2, b int4, c int8); +/// +/// query III +/// select bit_xor(a), bit_xor(b), bit_xor(c) from t; +/// ---- +/// NULL NULL NULL +/// +/// statement ok +/// insert into t values +/// (3, 3, 3), +/// (6, 6, 6), +/// (null, null, null); +/// +/// query III +/// select bit_xor(a), bit_xor(b), bit_xor(c) from t; +/// ---- +/// 5 5 5 +/// +/// statement ok +/// drop table t; +/// ``` +#[aggregate("bit_xor(*int) -> auto")] +fn bit_xor(state: T, input: T, _retract: bool) -> T +where + T: BitXor, +{ + state.bitxor(input) +} diff --git a/src/expr/impl/src/aggregate/bool_and.rs b/src/expr/impl/src/aggregate/bool_and.rs new file mode 100644 index 0000000000000..0560a56e56773 --- /dev/null +++ b/src/expr/impl/src/aggregate/bool_and.rs @@ -0,0 +1,125 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_expr::aggregate; + +/// Returns true if all non-null input values are true, otherwise false. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (b1 boolean, b2 boolean, b3 boolean, b4 boolean); +/// +/// query T +/// select bool_and(b1) from t; +/// ---- +/// NULL +/// +/// statement ok +/// insert into t values +/// (true, null, false, null), +/// (false, true, null, null), +/// (null, true, false, null); +/// +/// query TTTTTT +/// select +/// bool_and(b1), +/// bool_and(b2), +/// bool_and(b3), +/// bool_and(b4), +/// bool_and(NOT b2), +/// bool_and(NOT b3) +/// FROM t; +/// ---- +/// f t f NULL f t +/// +/// statement ok +/// drop table t; +/// ``` +#[aggregate("bool_and(boolean) -> boolean", state = "ref")] +fn bool_and_append_only(state: bool, input: bool) -> bool { + state && input +} + +/// Returns true if all non-null input values are true, otherwise false. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (b boolean); +/// +/// statement ok +/// create materialized view mv as select bool_and(b) from t; +/// +/// query T +/// select * from mv; +/// ---- +/// NULL +/// +/// statement ok +/// insert into t values (true), (false), (null); +/// +/// query T +/// select * from mv; +/// ---- +/// f +/// +/// statement ok +/// delete from t where b is false; +/// +/// query T +/// select * from mv; +/// ---- +/// t +/// +/// statement ok +/// drop materialized view mv; +/// +/// statement ok +/// drop table t; +/// ``` +#[derive(Debug, Default, Clone)] +struct BoolAndUpdatable; + +#[aggregate("bool_and(boolean) -> boolean", state = "int8")] +impl BoolAndUpdatable { + // state is the number of false values + + fn accumulate(&self, state: i64, input: bool) -> i64 { + if input { + state + } else { + state + 1 + } + } + + fn retract(&self, state: i64, input: bool) -> i64 { + if input { + state + } else { + state - 1 + } + } + + #[allow(dead_code)] // TODO: support merge + fn merge(&self, state1: i64, state2: i64) -> i64 { + state1 + state2 + } + + fn finalize(&self, state: i64) -> bool { + state == 0 + } +} diff --git a/src/expr/impl/src/aggregate/bool_or.rs b/src/expr/impl/src/aggregate/bool_or.rs new file mode 100644 index 0000000000000..919ae904dbb7c --- /dev/null +++ b/src/expr/impl/src/aggregate/bool_or.rs @@ -0,0 +1,125 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_expr::aggregate; + +/// Returns true if any non-null input value is true, otherwise false. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (b1 boolean, b2 boolean, b3 boolean, b4 boolean); +/// +/// query T +/// select bool_or(b1) from t; +/// ---- +/// NULL +/// +/// statement ok +/// insert into t values +/// (true, null, false, null), +/// (false, true, null, null), +/// (null, true, false, null); +/// +/// query TTTTTT +/// select +/// bool_or(b1), +/// bool_or(b2), +/// bool_or(b3), +/// bool_or(b4), +/// bool_or(NOT b2), +/// bool_or(NOT b3) +/// FROM t; +/// ---- +/// t t f NULL f t +/// +/// statement ok +/// drop table t; +/// ``` +#[aggregate("bool_or(boolean) -> boolean")] +fn bool_or_append_only(state: bool, input: bool) -> bool { + state || input +} + +/// Returns true if any non-null input value is true, otherwise false. +/// +/// # Example +/// +/// ```slt +/// statement ok +/// create table t (b boolean); +/// +/// statement ok +/// create materialized view mv as select bool_or(b) from t; +/// +/// query T +/// select * from mv; +/// ---- +/// NULL +/// +/// statement ok +/// insert into t values (true), (false), (null); +/// +/// query T +/// select * from mv; +/// ---- +/// t +/// +/// statement ok +/// delete from t where b is true; +/// +/// query T +/// select * from mv; +/// ---- +/// f +/// +/// statement ok +/// drop materialized view mv; +/// +/// statement ok +/// drop table t; +/// ``` +#[derive(Debug, Default, Clone)] +struct BoolOrUpdatable; + +#[aggregate("bool_or(boolean) -> boolean", state = "int8")] +impl BoolOrUpdatable { + // state is the number of true values + + fn accumulate(&self, state: i64, input: bool) -> i64 { + if input { + state + 1 + } else { + state + } + } + + fn retract(&self, state: i64, input: bool) -> i64 { + if input { + state - 1 + } else { + state + } + } + + #[allow(dead_code)] // TODO: support merge + fn merge(&self, state1: i64, state2: i64) -> i64 { + state1 + state2 + } + + fn finalize(&self, state: i64) -> bool { + state != 0 + } +} diff --git a/src/expr/src/agg/general.rs b/src/expr/impl/src/aggregate/general.rs similarity index 74% rename from src/expr/src/agg/general.rs rename to src/expr/impl/src/aggregate/general.rs index 1ee2017686ee9..f47c94d45f24d 100644 --- a/src/expr/src/agg/general.rs +++ b/src/expr/impl/src/aggregate/general.rs @@ -13,41 +13,33 @@ // limitations under the License. use std::convert::From; -use std::ops::{BitAnd, BitOr, BitXor}; use num_traits::{CheckedAdd, CheckedSub}; -use risingwave_expr_macro::aggregate; - -use crate::{ExprError, Result}; - -#[aggregate("sum(int16) -> int64")] -#[aggregate("sum(int32) -> int64")] -#[aggregate("sum(int64) -> int64")] -#[aggregate("sum(int64) -> decimal")] -#[aggregate("sum(float32) -> float32")] -#[aggregate("sum(float64) -> float64")] +use risingwave_expr::{aggregate, ExprError, Result}; + +#[aggregate("sum(int2) -> int8")] +#[aggregate("sum(int4) -> int8")] +#[aggregate("sum(int8) -> int8")] +#[aggregate("sum(int8) -> decimal")] +#[aggregate("sum(float4) -> float4")] +#[aggregate("sum(float8) -> float8")] #[aggregate("sum(decimal) -> decimal")] #[aggregate("sum(interval) -> interval")] #[aggregate("sum(int256) -> int256")] -#[aggregate("sum0(int64) -> int64", init_state = "0i64")] -fn sum(state: Option, input: Option, retract: bool) -> Result> +#[aggregate("sum0(int8) -> int8", init_state = "0i64")] +fn sum(state: S, input: T, retract: bool) -> Result where S: Default + From + CheckedAdd + CheckedSub, { - let Some(input) = input else { - return Ok(state); - }; - let state = state.unwrap_or_default(); - let result = if retract { + if retract { state .checked_sub(&S::from(input)) - .ok_or_else(|| ExprError::NumericOutOfRange)? + .ok_or_else(|| ExprError::NumericOutOfRange) } else { state .checked_add(&S::from(input)) - .ok_or_else(|| ExprError::NumericOutOfRange)? - }; - Ok(Some(result)) + .ok_or_else(|| ExprError::NumericOutOfRange) + } } #[aggregate("min(*) -> auto", state = "ref")] @@ -60,30 +52,6 @@ fn max(state: T, input: T) -> T { state.max(input) } -#[aggregate("bit_and(*int) -> auto")] -fn bit_and(state: T, input: T) -> T -where - T: BitAnd, -{ - state.bitand(input) -} - -#[aggregate("bit_or(*int) -> auto")] -fn bit_or(state: T, input: T) -> T -where - T: BitOr, -{ - state.bitor(input) -} - -#[aggregate("bit_xor(*int) -> auto")] -fn bit_xor(state: T, input: T, _retract: bool) -> T -where - T: BitXor, -{ - state.bitxor(input) -} - #[aggregate("first_value(*) -> auto", state = "ref")] fn first_value(state: T, _: T) -> T { state @@ -94,6 +62,15 @@ fn last_value(_: T, input: T) -> T { input } +#[aggregate("internal_last_seen_value(*) -> auto", state = "ref")] +fn internal_last_seen_value(state: T, input: T, retract: bool) -> T { + if retract { + state + } else { + input + } +} + /// Note the following corner cases: /// /// ```slt @@ -121,7 +98,7 @@ fn last_value(_: T, input: T) -> T { /// statement ok /// drop table t; /// ``` -#[aggregate("count(*) -> int64", init_state = "0i64")] +#[aggregate("count(*) -> int8", init_state = "0i64")] fn count(state: i64, _: T, retract: bool) -> i64 { if retract { state - 1 @@ -130,7 +107,7 @@ fn count(state: i64, _: T, retract: bool) -> i64 { } } -#[aggregate("count() -> int64", init_state = "0i64")] +#[aggregate("count() -> int8", init_state = "0i64")] fn count_star(state: i64, retract: bool) -> i64 { if retract { state - 1 @@ -139,84 +116,6 @@ fn count_star(state: i64, retract: bool) -> i64 { } } -/// Returns true if all non-null input values are true, otherwise false. -/// -/// # Example -/// -/// ```slt -/// statement ok -/// create table t (b1 boolean, b2 boolean, b3 boolean, b4 boolean); -/// -/// query T -/// select bool_and(b1) from t; -/// ---- -/// NULL -/// -/// statement ok -/// insert into t values -/// (true, null, false, null), -/// (false, true, null, null), -/// (null, true, false, null); -/// -/// query TTTTTT -/// select -/// bool_and(b1), -/// bool_and(b2), -/// bool_and(b3), -/// bool_and(b4), -/// bool_and(NOT b2), -/// bool_and(NOT b3) -/// FROM t; -/// ---- -/// f t f NULL f t -/// -/// statement ok -/// drop table t; -/// ``` -#[aggregate("bool_and(boolean) -> boolean")] -fn bool_and(state: bool, input: bool) -> bool { - state && input -} - -/// Returns true if any non-null input value is true, otherwise false. -/// -/// # Example -/// -/// ```slt -/// statement ok -/// create table t (b1 boolean, b2 boolean, b3 boolean, b4 boolean); -/// -/// query T -/// select bool_or(b1) from t; -/// ---- -/// NULL -/// -/// statement ok -/// insert into t values -/// (true, null, false, null), -/// (false, true, null, null), -/// (null, true, false, null); -/// -/// query TTTTTT -/// select -/// bool_or(b1), -/// bool_or(b2), -/// bool_or(b3), -/// bool_or(b4), -/// bool_or(NOT b2), -/// bool_or(NOT b3) -/// FROM t; -/// ---- -/// t t f NULL f t -/// -/// statement ok -/// drop table t; -/// ``` -#[aggregate("bool_or(boolean) -> boolean")] -fn bool_or(state: bool, input: bool) -> bool { - state || input -} - #[cfg(test)] mod tests { extern crate test; @@ -227,12 +126,11 @@ mod tests { use risingwave_common::array::*; use risingwave_common::test_utils::{rand_bitmap, rand_stream_chunk}; use risingwave_common::types::{Datum, Decimal}; + use risingwave_expr::aggregate::{build_append_only, AggCall}; use test::Bencher; - use crate::agg::AggCall; - fn test_agg(pretty: &str, input: StreamChunk, expected: Datum) { - let agg = crate::agg::build(&AggCall::from_pretty(pretty)).unwrap(); + let agg = build_append_only(&AggCall::from_pretty(pretty)).unwrap(); let mut state = agg.create_state(); agg.update(&mut state, &input) .now_or_never() @@ -243,7 +141,7 @@ mod tests { } #[test] - fn sum_int32() { + fn sum_int4() { let input = StreamChunk::from_pretty( " i + 3 @@ -255,7 +153,7 @@ mod tests { } #[test] - fn sum_int64() { + fn sum_int8() { let input = StreamChunk::from_pretty( " I + 3 @@ -271,7 +169,7 @@ mod tests { } #[test] - fn sum_float64() { + fn sum_float8() { let input = StreamChunk::from_pretty( " F + 1.0 @@ -332,7 +230,7 @@ mod tests { } #[test] - fn min_int64() { + fn min_int8() { let input = StreamChunk::from_pretty( " I + 1 D @@ -344,7 +242,7 @@ mod tests { } #[test] - fn min_float32() { + fn min_float4() { let input = StreamChunk::from_pretty( " f + 1.0 D @@ -381,7 +279,7 @@ mod tests { } #[test] - fn max_int64() { + fn max_int8() { let input = StreamChunk::from_pretty( " I + 1 @@ -403,7 +301,7 @@ mod tests { } #[test] - fn count_int32() { + fn count_int4() { let input = StreamChunk::from_pretty( " i + 1 @@ -472,7 +370,7 @@ mod tests { } #[test] - fn bitxor_int64() { + fn bitxor_int8() { let input = StreamChunk::from_pretty( " I + 1 @@ -494,28 +392,13 @@ mod tests { "benching {} agg, chunk_size={}, vis_rate={}", agg_desc, chunk_size, vis_rate ); - let bitmap = if vis_rate < 1.0 { - Some(rand_bitmap::gen_rand_bitmap( - chunk_size, - (chunk_size as f64 * vis_rate) as usize, - 666, - )) - } else { - None - }; - let (ops, data) = rand_stream_chunk::gen_legal_stream_chunk( - bitmap.as_ref(), - chunk_size, - append_only, - 666, - ); - let vis = match bitmap { - Some(bitmap) => Vis::Bitmap(bitmap), - None => Vis::Compact(chunk_size), - }; + let vis = + rand_bitmap::gen_rand_bitmap(chunk_size, (chunk_size as f64 * vis_rate) as usize, 666); + let (ops, data) = + rand_stream_chunk::gen_legal_stream_chunk(&vis, chunk_size, append_only, 666); let chunk = StreamChunk::from_parts(ops, DataChunk::new(vec![Arc::new(data)], vis)); let pretty = format!("({agg_desc}:int8 $0:int8)"); - let agg = crate::agg::build(&AggCall::from_pretty(pretty)).unwrap(); + let agg = build_append_only(&AggCall::from_pretty(pretty)).unwrap(); let mut state = agg.create_state(); b.iter(|| { agg.update(&mut state, &chunk) diff --git a/src/expr/src/agg/jsonb_agg.rs b/src/expr/impl/src/aggregate/jsonb_agg.rs similarity index 95% rename from src/expr/src/agg/jsonb_agg.rs rename to src/expr/impl/src/aggregate/jsonb_agg.rs index b4975dd2c483b..8385e2c6a060b 100644 --- a/src/expr/src/agg/jsonb_agg.rs +++ b/src/expr/impl/src/aggregate/jsonb_agg.rs @@ -12,13 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_common::bail; use risingwave_common::types::JsonbVal; -use risingwave_expr_macro::aggregate; +use risingwave_expr::{aggregate, ExprError, Result}; use serde_json::Value; -use crate::{ExprError, Result}; - #[aggregate("jsonb_agg(boolean) -> jsonb")] #[aggregate("jsonb_agg(*int) -> jsonb")] #[aggregate("jsonb_agg(*float) -> jsonb")] diff --git a/src/expr/impl/src/aggregate/mod.rs b/src/expr/impl/src/aggregate/mod.rs new file mode 100644 index 0000000000000..d1373acae31b2 --- /dev/null +++ b/src/expr/impl/src/aggregate/mod.rs @@ -0,0 +1,27 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod approx_count_distinct; +mod array_agg; +mod bit_and; +mod bit_or; +mod bit_xor; +mod bool_and; +mod bool_or; +mod general; +mod jsonb_agg; +mod mode; +mod percentile_cont; +mod percentile_disc; +mod string_agg; diff --git a/src/expr/src/agg/mode.rs b/src/expr/impl/src/aggregate/mode.rs similarity index 94% rename from src/expr/src/agg/mode.rs rename to src/expr/impl/src/aggregate/mode.rs index 6cf26c0bb64f5..927fb5c801f87 100644 --- a/src/expr/src/agg/mode.rs +++ b/src/expr/impl/src/aggregate/mode.rs @@ -18,13 +18,12 @@ use risingwave_common::array::*; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::row::Row; use risingwave_common::types::*; -use risingwave_expr_macro::build_aggregate; +use risingwave_expr::aggregate::{ + AggCall, AggStateDyn, AggregateFunction, AggregateState, BoxedAggregateFunction, +}; +use risingwave_expr::{build_aggregate, Result}; -use super::{AggStateDyn, AggregateFunction, AggregateState, BoxedAggregateFunction}; -use crate::agg::AggCall; -use crate::Result; - -#[build_aggregate("mode(*) -> auto")] +#[build_aggregate("mode(any) -> any")] fn build(agg: &AggCall) -> Result { Ok(Box::new(Mode { return_type: agg.return_type.clone(), diff --git a/src/expr/src/agg/percentile_cont.rs b/src/expr/impl/src/aggregate/percentile_cont.rs similarity index 89% rename from src/expr/src/agg/percentile_cont.rs rename to src/expr/impl/src/aggregate/percentile_cont.rs index 56740695ebd14..46002d1f596f7 100644 --- a/src/expr/src/agg/percentile_cont.rs +++ b/src/expr/impl/src/aggregate/percentile_cont.rs @@ -15,13 +15,11 @@ use std::ops::Range; use risingwave_common::array::*; +use risingwave_common::estimate_size::EstimateSize; use risingwave_common::row::Row; use risingwave_common::types::*; -use risingwave_expr_macro::build_aggregate; - -use super::{AggStateDyn, AggregateFunction, AggregateState}; -use crate::agg::AggCall; -use crate::Result; +use risingwave_expr::aggregate::{AggCall, AggStateDyn, AggregateFunction, AggregateState}; +use risingwave_expr::{build_aggregate, Result}; /// Computes the continuous percentile, a value corresponding to the specified fraction within the /// ordered set of aggregated argument values. This will interpolate between adjacent input items if @@ -62,7 +60,7 @@ use crate::Result; /// statement ok /// drop table t; /// ``` -#[build_aggregate("percentile_cont(float64) -> float64")] +#[build_aggregate("percentile_cont(float8) -> float8")] fn build(agg: &AggCall) -> Result> { let fraction = agg.direct_args[0] .literal() @@ -74,14 +72,15 @@ pub struct PercentileCont { fraction: Option, } -type State = Vec; +#[derive(Debug, Default, EstimateSize)] +struct State(Vec); impl AggStateDyn for State {} impl PercentileCont { fn add_datum(&self, state: &mut State, datum_ref: DatumRef<'_>) { if let Some(datum) = datum_ref.to_owned_datum() { - state.push((*datum.as_float64()).into()); + state.0.push((*datum.as_float64()).into()); } } } @@ -118,7 +117,7 @@ impl AggregateFunction for PercentileCont { } async fn get_result(&self, state: &AggregateState) -> Result { - let state = state.downcast_ref::(); + let state = &state.downcast_ref::().0; Ok(if let Some(fraction) = self.fraction && !state.is_empty() { let rn = fraction * (state.len() - 1) as f64; let crn = f64::ceil(rn); diff --git a/src/expr/src/agg/percentile_disc.rs b/src/expr/impl/src/aggregate/percentile_disc.rs similarity index 94% rename from src/expr/src/agg/percentile_disc.rs rename to src/expr/impl/src/aggregate/percentile_disc.rs index 2c65b7aa52a61..c9143dcf8e640 100644 --- a/src/expr/src/agg/percentile_disc.rs +++ b/src/expr/impl/src/aggregate/percentile_disc.rs @@ -18,11 +18,10 @@ use risingwave_common::array::*; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::row::Row; use risingwave_common::types::*; -use risingwave_expr_macro::build_aggregate; - -use super::{AggStateDyn, AggregateFunction, AggregateState, BoxedAggregateFunction}; -use crate::agg::AggCall; -use crate::Result; +use risingwave_expr::aggregate::{ + AggCall, AggStateDyn, AggregateFunction, AggregateState, BoxedAggregateFunction, +}; +use risingwave_expr::{build_aggregate, Result}; /// Computes the discrete percentile, the first value within the ordered set of aggregated argument /// values whose position in the ordering equals or exceeds the specified fraction. The aggregated @@ -68,7 +67,7 @@ use crate::Result; /// statement ok /// drop table t; /// ``` -#[build_aggregate("percentile_disc(*) -> auto")] +#[build_aggregate("percentile_disc(any) -> any")] fn build(agg: &AggCall) -> Result { let fractions = agg.direct_args[0] .literal() diff --git a/src/expr/src/agg/string_agg.rs b/src/expr/impl/src/aggregate/string_agg.rs similarity index 89% rename from src/expr/src/agg/string_agg.rs rename to src/expr/impl/src/aggregate/string_agg.rs index 2c37970c7413c..6bd9c8e82ee3d 100644 --- a/src/expr/src/agg/string_agg.rs +++ b/src/expr/impl/src/aggregate/string_agg.rs @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_common::bail; -use risingwave_expr_macro::aggregate; +use risingwave_expr::aggregate; #[aggregate("string_agg(varchar, varchar) -> varchar")] fn string_agg( @@ -34,9 +33,8 @@ fn string_agg( #[cfg(test)] mod tests { use risingwave_common::array::*; - - use crate::agg::AggCall; - use crate::Result; + use risingwave_expr::aggregate::{build_append_only, AggCall}; + use risingwave_expr::Result; #[tokio::test] async fn test_string_agg_basic() -> Result<()> { @@ -47,7 +45,7 @@ mod tests { + ccc , + ddd ,", ); - let string_agg = crate::agg::build(&AggCall::from_pretty( + let string_agg = build_append_only(&AggCall::from_pretty( "(string_agg:varchar $0:varchar $1:varchar)", ))?; let mut state = string_agg.create_state(); @@ -68,7 +66,7 @@ mod tests { + ccc _ + ddd .", ); - let string_agg = crate::agg::build(&AggCall::from_pretty( + let string_agg = build_append_only(&AggCall::from_pretty( "(string_agg:varchar $0:varchar $1:varchar)", ))?; let mut state = string_agg.create_state(); diff --git a/src/expr/impl/src/lib.rs b/src/expr/impl/src/lib.rs new file mode 100644 index 0000000000000..6ea82d30ac5f1 --- /dev/null +++ b/src/expr/impl/src/lib.rs @@ -0,0 +1,45 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Function implementations. +//! +//! To enable functions in this crate, add the following line to your code: +//! +//! ``` +//! risingwave_expr_impl::enable!(); +//! ``` + +#![allow(non_snake_case)] // for `ctor` generated code +#![feature(let_chains)] +#![feature(assert_matches)] +#![feature(lint_reasons)] +#![feature(iterator_try_collect)] +#![feature(exclusive_range_pattern)] +#![feature(lazy_cell)] +#![feature(round_ties_even)] +#![feature(coroutines)] +#![feature(test)] +#![feature(arc_unwrap_or_clone)] + +mod aggregate; +mod scalar; +mod table_function; + +/// Enable functions in this crate. +#[macro_export] +macro_rules! enable { + () => { + use risingwave_expr_impl as _; + }; +} diff --git a/src/expr/src/vector_op/arithmetic_op.rs b/src/expr/impl/src/scalar/arithmetic_op.rs similarity index 94% rename from src/expr/src/vector_op/arithmetic_op.rs rename to src/expr/impl/src/scalar/arithmetic_op.rs index bbe6c5daa9484..f12bf6dc5e649 100644 --- a/src/expr/src/vector_op/arithmetic_op.rs +++ b/src/expr/impl/src/scalar/arithmetic_op.rs @@ -20,13 +20,11 @@ use num_traits::{CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedSub, Zer use risingwave_common::types::{ CheckedAdd, Date, Decimal, FloatExt, Interval, IsNegative, Time, Timestamp, F64, }; -use risingwave_expr_macro::function; +use risingwave_expr::{function, ExprError, Result}; use rust_decimal::MathematicalOps; -use crate::{ExprError, Result}; - #[function("add(*int, *int) -> auto")] -#[function("add(*numeric, *numeric) -> auto")] +#[function("add(decimal, decimal) -> auto")] #[function("add(*float, *float) -> auto")] #[function("add(interval, interval) -> interval")] #[function("add(int256, int256) -> int256")] @@ -42,7 +40,7 @@ where } #[function("subtract(*int, *int) -> auto")] -#[function("subtract(*numeric, *numeric) -> auto")] +#[function("subtract(decimal, decimal) -> auto")] #[function("subtract(*float, *float) -> auto")] #[function("subtract(interval, interval) -> interval")] #[function("subtract(int256, int256) -> int256")] @@ -58,7 +56,7 @@ where } #[function("multiply(*int, *int) -> auto")] -#[function("multiply(*numeric, *numeric) -> auto")] +#[function("multiply(decimal, decimal) -> auto")] #[function("multiply(*float, *float) -> auto")] #[function("multiply(int256, int256) -> int256")] pub fn general_mul(l: T1, r: T2) -> Result @@ -73,10 +71,10 @@ where } #[function("divide(*int, *int) -> auto")] -#[function("divide(*numeric, *numeric) -> auto")] +#[function("divide(decimal, decimal) -> auto")] #[function("divide(*float, *float) -> auto")] #[function("divide(int256, int256) -> int256")] -#[function("divide(int256, float64) -> float64")] +#[function("divide(int256, float8) -> float8")] #[function("divide(int256, *int) -> int256")] pub fn general_div(l: T1, r: T2) -> Result where @@ -96,7 +94,7 @@ where } #[function("modulus(*int, *int) -> auto")] -#[function("modulus(*numeric, *numeric) -> auto")] +#[function("modulus(decimal, decimal) -> auto")] #[function("modulus(int256, int256) -> int256")] pub fn general_mod(l: T1, r: T2) -> Result where @@ -173,7 +171,7 @@ fn err_pow_negative_fract() -> ExprError { } } -#[function("pow(float64, float64) -> float64")] +#[function("pow(float8, float8) -> float8")] pub fn pow_f64(l: F64, r: F64) -> Result { if l.is_zero() && r.0 < 0.0 { return Err(err_pow_zero_negative()); @@ -223,7 +221,7 @@ pub fn timestamp_timestamp_sub(l: Timestamp, r: Timestamp) -> Result { Ok(Interval::from_month_day_usec(0, days as i32, usecs)) } -#[function("subtract(date, date) -> int32")] +#[function("subtract(date, date) -> int4")] pub fn date_date_sub(l: Date, r: Date) -> Result { Ok((l.0 - r.0).num_days() as i32) // this does not overflow or underflow } @@ -255,7 +253,7 @@ pub fn date_interval_sub(l: Date, r: Interval) -> Result { interval_date_add(r.checked_neg().ok_or(ExprError::NumericOutOfRange)?, l) } -#[function("add(date, int32) -> date")] +#[function("add(date, int4) -> date")] pub fn date_int_add(l: Date, r: i32) -> Result { let date = l.0; let date_wrapper = date @@ -265,12 +263,12 @@ pub fn date_int_add(l: Date, r: i32) -> Result { date_wrapper.ok_or(ExprError::NumericOutOfRange) } -#[function("add(int32, date) -> date")] +#[function("add(int4, date) -> date")] pub fn int_date_add(l: i32, r: Date) -> Result { date_int_add(r, l) } -#[function("subtract(date, int32) -> date")] +#[function("subtract(date, int4) -> date")] pub fn date_int_sub(l: Date, r: i32) -> Result { let date = l.0; let date_wrapper = date @@ -342,7 +340,7 @@ pub fn time_interval_add(l: Time, r: Interval) -> Result::RefItem<'a>: PartialEq, + F: Fn(&str) -> ::OwnedItem, + { + let mut input = Vec::>>::new(); + let mut target = Vec::::OwnedItem>>::new(); + for i in 0..1u32 { + if i % 2 == 0 { + let s = i.to_string().into_boxed_str(); + target.push(Some(f(&s))); + input.push(Some(s)); + } else { + input.push(None); + target.push(None); + } + } + let col1_data = &input.iter().map(|x| x.as_ref().map(|x| &**x)).collect_vec(); + let col1 = Utf8Array::from_iter(col1_data).into_ref(); + let data_chunk = DataChunk::new(vec![col1], 1); + let expr = build_from_pretty("(cast:int2 $0:varchar)"); + let res = expr.eval(&data_chunk).await.unwrap(); + let arr: &A = res.as_ref().into(); + for (idx, item) in arr.iter().enumerate() { + let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); + assert_eq!(x, item); + } + + for i in 0..input.len() { + let row = OwnedRow::new(vec![input[i] + .as_ref() + .cloned() + .map(|str| str.to_scalar_value())]); + let result = expr.eval_row(&row).await.unwrap(); + let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); + assert_eq!(result, expected); + } + } + + async fn test_unary_bool(f: F, kind: PbType) + where + A: Array, + for<'a> &'a A: std::convert::From<&'a ArrayImpl>, + for<'a> ::RefItem<'a>: PartialEq, + F: Fn(bool) -> ::OwnedItem, + { + let mut input = Vec::>::new(); + let mut target = Vec::::OwnedItem>>::new(); + for i in 0..100 { + if i % 2 == 0 { + input.push(Some(true)); + target.push(Some(f(true))); + } else if i % 3 == 0 { + input.push(Some(false)); + target.push(Some(f(false))); + } else { + input.push(None); + target.push(None); + } + } + + let col1 = BoolArray::from_iter(&input).into_ref(); + let data_chunk = DataChunk::new(vec![col1], 100); + let expr = build_from_pretty(format!("({kind:?}:boolean $0:boolean)")); + let res = expr.eval(&data_chunk).await.unwrap(); + let arr: &A = res.as_ref().into(); + for (idx, item) in arr.iter().enumerate() { + let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); + assert_eq!(x, item); + } + + for i in 0..input.len() { + let row = OwnedRow::new(vec![input[i].map(|b| b.to_scalar_value())]); + let result = expr.eval_row(&row).await.unwrap(); + let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); + assert_eq!(result, expected); + } + } + + async fn test_unary_date(f: F, kind: PbType) + where + A: Array, + for<'a> &'a A: std::convert::From<&'a ArrayImpl>, + for<'a> ::RefItem<'a>: PartialEq, + F: Fn(Date) -> ::OwnedItem, + { + let mut input = Vec::>::new(); + let mut target = Vec::::OwnedItem>>::new(); + for i in 0..100 { + if i % 2 == 0 { + let date = Date::from_num_days_from_ce_uncheck(i); + input.push(Some(date)); + target.push(Some(f(date))); + } else { + input.push(None); + target.push(None); + } + } + + let col1 = DateArray::from_iter(&input).into_ref(); + let data_chunk = DataChunk::new(vec![col1], 100); + let expr = build_from_pretty(format!("({kind:?}:timestamp $0:date)")); + let res = expr.eval(&data_chunk).await.unwrap(); + let arr: &A = res.as_ref().into(); + for (idx, item) in arr.iter().enumerate() { + let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); + assert_eq!(x, item); + } + + for i in 0..input.len() { + let row = OwnedRow::new(vec![input[i].map(|d| d.to_scalar_value())]); + let result = expr.eval_row(&row).await.unwrap(); + let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); + assert_eq!(result, expected); + } + } +} diff --git a/src/expr/src/vector_op/cmp.rs b/src/expr/impl/src/scalar/cmp.rs similarity index 52% rename from src/expr/src/vector_op/cmp.rs rename to src/expr/impl/src/scalar/cmp.rs index 701f43db2041f..beccbc9e6766d 100644 --- a/src/expr/src/vector_op/cmp.rs +++ b/src/expr/impl/src/scalar/cmp.rs @@ -16,10 +16,13 @@ use std::fmt::Debug; use risingwave_common::array::{Array, BoolArray}; use risingwave_common::buffer::Bitmap; -use risingwave_expr_macro::function; +use risingwave_common::row::Row; +use risingwave_common::types::{Scalar, ScalarRef, ScalarRefImpl}; +use risingwave_expr::function; +#[function("equal(boolean, boolean) -> boolean", batch_fn = "boolarray_eq")] #[function("equal(*int, *int) -> boolean")] -#[function("equal(*numeric, *numeric) -> boolean")] +#[function("equal(decimal, decimal) -> boolean")] #[function("equal(*float, *float) -> boolean")] #[function("equal(int256, int256) -> boolean")] #[function("equal(serial, serial) -> boolean")] @@ -34,7 +37,7 @@ use risingwave_expr_macro::function; #[function("equal(interval, time) -> boolean")] #[function("equal(varchar, varchar) -> boolean")] #[function("equal(bytea, bytea) -> boolean")] -#[function("equal(list, list) -> boolean")] +#[function("equal(anyarray, anyarray) -> boolean")] #[function("equal(struct, struct) -> boolean")] pub fn general_eq(l: T1, r: T2) -> bool where @@ -45,8 +48,9 @@ where l.into() == r.into() } +#[function("not_equal(boolean, boolean) -> boolean", batch_fn = "boolarray_ne")] #[function("not_equal(*int, *int) -> boolean")] -#[function("not_equal(*numeric, *numeric) -> boolean")] +#[function("not_equal(decimal, decimal) -> boolean")] #[function("not_equal(*float, *float) -> boolean")] #[function("not_equal(int256, int256) -> boolean")] #[function("not_equal(serial, serial) -> boolean")] @@ -61,7 +65,7 @@ where #[function("not_equal(interval, time) -> boolean")] #[function("not_equal(varchar, varchar) -> boolean")] #[function("not_equal(bytea, bytea) -> boolean")] -#[function("not_equal(list, list) -> boolean")] +#[function("not_equal(anyarray, anyarray) -> boolean")] #[function("not_equal(struct, struct) -> boolean")] pub fn general_ne(l: T1, r: T2) -> bool where @@ -72,8 +76,12 @@ where l.into() != r.into() } +#[function( + "greater_than_or_equal(boolean, boolean) -> boolean", + batch_fn = "boolarray_ge" +)] #[function("greater_than_or_equal(*int, *int) -> boolean")] -#[function("greater_than_or_equal(*numeric, *numeric) -> boolean")] +#[function("greater_than_or_equal(decimal, decimal) -> boolean")] #[function("greater_than_or_equal(*float, *float) -> boolean")] #[function("greater_than_or_equal(serial, serial) -> boolean")] #[function("greater_than_or_equal(int256, int256) -> boolean")] @@ -88,7 +96,7 @@ where #[function("greater_than_or_equal(interval, time) -> boolean")] #[function("greater_than_or_equal(varchar, varchar) -> boolean")] #[function("greater_than_or_equal(bytea, bytea) -> boolean")] -#[function("greater_than_or_equal(list, list) -> boolean")] +#[function("greater_than_or_equal(anyarray, anyarray) -> boolean")] #[function("greater_than_or_equal(struct, struct) -> boolean")] pub fn general_ge(l: T1, r: T2) -> bool where @@ -99,8 +107,9 @@ where l.into() >= r.into() } +#[function("greater_than(boolean, boolean) -> boolean", batch_fn = "boolarray_gt")] #[function("greater_than(*int, *int) -> boolean")] -#[function("greater_than(*numeric, *numeric) -> boolean")] +#[function("greater_than(decimal, decimal) -> boolean")] #[function("greater_than(*float, *float) -> boolean")] #[function("greater_than(serial, serial) -> boolean")] #[function("greater_than(int256, int256) -> boolean")] @@ -115,7 +124,7 @@ where #[function("greater_than(interval, time) -> boolean")] #[function("greater_than(varchar, varchar) -> boolean")] #[function("greater_than(bytea, bytea) -> boolean")] -#[function("greater_than(list, list) -> boolean")] +#[function("greater_than(anyarray, anyarray) -> boolean")] #[function("greater_than(struct, struct) -> boolean")] pub fn general_gt(l: T1, r: T2) -> bool where @@ -126,8 +135,12 @@ where l.into() > r.into() } +#[function( + "less_than_or_equal(boolean, boolean) -> boolean", + batch_fn = "boolarray_le" +)] #[function("less_than_or_equal(*int, *int) -> boolean")] -#[function("less_than_or_equal(*numeric, *numeric) -> boolean")] +#[function("less_than_or_equal(decimal, decimal) -> boolean")] #[function("less_than_or_equal(*float, *float) -> boolean")] #[function("less_than_or_equal(serial, serial) -> boolean")] #[function("less_than_or_equal(int256, int256) -> boolean")] @@ -142,7 +155,7 @@ where #[function("less_than_or_equal(interval, time) -> boolean")] #[function("less_than_or_equal(varchar, varchar) -> boolean")] #[function("less_than_or_equal(bytea, bytea) -> boolean")] -#[function("less_than_or_equal(list, list) -> boolean")] +#[function("less_than_or_equal(anyarray, anyarray) -> boolean")] #[function("less_than_or_equal(struct, struct) -> boolean")] pub fn general_le(l: T1, r: T2) -> bool where @@ -153,8 +166,9 @@ where l.into() <= r.into() } +#[function("less_than(boolean, boolean) -> boolean", batch_fn = "boolarray_lt")] #[function("less_than(*int, *int) -> boolean")] -#[function("less_than(*numeric, *numeric) -> boolean")] +#[function("less_than(decimal, decimal) -> boolean")] #[function("less_than(*float, *float) -> boolean")] #[function("less_than(serial, serial) -> boolean")] #[function("less_than(int256, int256) -> boolean")] @@ -169,7 +183,7 @@ where #[function("less_than(interval, time) -> boolean")] #[function("less_than(varchar, varchar) -> boolean")] #[function("less_than(bytea, bytea) -> boolean")] -#[function("less_than(list, list) -> boolean")] +#[function("less_than(anyarray, anyarray) -> boolean")] #[function("less_than(struct, struct) -> boolean")] pub fn general_lt(l: T1, r: T2) -> bool where @@ -180,8 +194,12 @@ where l.into() < r.into() } +#[function( + "is_distinct_from(boolean, boolean) -> boolean", + batch_fn = "boolarray_is_distinct_from" +)] #[function("is_distinct_from(*int, *int) -> boolean")] -#[function("is_distinct_from(*numeric, *numeric) -> boolean")] +#[function("is_distinct_from(decimal, decimal) -> boolean")] #[function("is_distinct_from(*float, *float) -> boolean")] #[function("is_distinct_from(serial, serial) -> boolean")] #[function("is_distinct_from(int256, int256) -> boolean")] @@ -196,7 +214,7 @@ where #[function("is_distinct_from(interval, time) -> boolean")] #[function("is_distinct_from(varchar, varchar) -> boolean")] #[function("is_distinct_from(bytea, bytea) -> boolean")] -#[function("is_distinct_from(list, list) -> boolean")] +#[function("is_distinct_from(anyarray, anyarray) -> boolean")] #[function("is_distinct_from(struct, struct) -> boolean")] pub fn general_is_distinct_from(l: Option, r: Option) -> bool where @@ -207,8 +225,12 @@ where l.map(Into::into) != r.map(Into::into) } +#[function( + "is_not_distinct_from(boolean, boolean) -> boolean", + batch_fn = "boolarray_is_not_distinct_from" +)] #[function("is_not_distinct_from(*int, *int) -> boolean")] -#[function("is_not_distinct_from(*numeric, *numeric) -> boolean")] +#[function("is_not_distinct_from(decimal, decimal) -> boolean")] #[function("is_not_distinct_from(*float, *float) -> boolean")] #[function("is_not_distinct_from(serial, serial) -> boolean")] #[function("is_not_distinct_from(int256, int256) -> boolean")] @@ -223,7 +245,7 @@ where #[function("is_not_distinct_from(interval, time) -> boolean")] #[function("is_not_distinct_from(varchar, varchar) -> boolean")] #[function("is_not_distinct_from(bytea, bytea) -> boolean")] -#[function("is_not_distinct_from(list, list) -> boolean")] +#[function("is_not_distinct_from(anyarray, anyarray) -> boolean")] #[function("is_not_distinct_from(struct, struct) -> boolean")] pub fn general_is_not_distinct_from(l: Option, r: Option) -> bool where @@ -234,60 +256,6 @@ where l.map(Into::into) == r.map(Into::into) } -#[function("equal(boolean, boolean) -> boolean", batch_fn = "boolarray_eq")] -pub fn boolean_eq(l: bool, r: bool) -> bool { - l == r -} - -#[function("not_equal(boolean, boolean) -> boolean", batch_fn = "boolarray_ne")] -pub fn boolean_ne(l: bool, r: bool) -> bool { - l != r -} - -#[function( - "greater_than_or_equal(boolean, boolean) -> boolean", - batch_fn = "boolarray_ge" -)] -pub fn boolean_ge(l: bool, r: bool) -> bool { - l >= r -} - -#[allow(clippy::bool_comparison)] -#[function("greater_than(boolean, boolean) -> boolean", batch_fn = "boolarray_gt")] -pub fn boolean_gt(l: bool, r: bool) -> bool { - l > r -} - -#[function( - "less_than_or_equal(boolean, boolean) -> boolean", - batch_fn = "boolarray_le" -)] -pub fn boolean_le(l: bool, r: bool) -> bool { - l <= r -} - -#[allow(clippy::bool_comparison)] -#[function("less_than(boolean, boolean) -> boolean", batch_fn = "boolarray_lt")] -pub fn boolean_lt(l: bool, r: bool) -> bool { - l < r -} - -#[function( - "is_distinct_from(boolean, boolean) -> boolean", - batch_fn = "boolarray_is_distinct_from" -)] -pub fn boolean_is_distinct_from(l: Option, r: Option) -> bool { - l != r -} - -#[function( - "is_not_distinct_from(boolean, boolean) -> boolean", - batch_fn = "boolarray_is_not_distinct_from" -)] -pub fn boolean_is_not_distinct_from(l: Option, r: Option) -> bool { - l == r -} - #[function("is_true(boolean) -> boolean", batch_fn = "boolarray_is_true")] pub fn is_true(v: Option) -> bool { v == Some(true) @@ -311,6 +279,76 @@ pub fn is_not_false(v: Option) -> bool { v != Some(false) } +#[function("is_null(*) -> boolean", batch_fn = "batch_is_null")] +fn is_null(v: Option) -> bool { + v.is_none() +} + +#[function("is_not_null(*) -> boolean", batch_fn = "batch_is_not_null")] +fn is_not_null(v: Option) -> bool { + v.is_some() +} + +#[function("greatest(...) -> boolean")] +#[function("greatest(...) -> *int")] +#[function("greatest(...) -> decimal")] +#[function("greatest(...) -> *float")] +#[function("greatest(...) -> serial")] +#[function("greatest(...) -> int256")] +#[function("greatest(...) -> date")] +#[function("greatest(...) -> time")] +#[function("greatest(...) -> interval")] +#[function("greatest(...) -> timestamp")] +#[function("greatest(...) -> timestamptz")] +#[function("greatest(...) -> varchar")] +#[function("greatest(...) -> bytea")] +pub fn general_variadic_greatest(row: impl Row) -> Option +where + T: Scalar, + for<'a> ::ScalarRefType<'a>: TryFrom> + Ord + Debug, +{ + row.iter() + .flatten() + .map( + |scalar| match <::ScalarRefType<'_>>::try_from(scalar) { + Ok(v) => v, + Err(_) => unreachable!("all input type should have been aligned in the frontend"), + }, + ) + .max() + .map(|v| v.to_owned_scalar()) +} + +#[function("least(...) -> boolean")] +#[function("least(...) -> *int")] +#[function("least(...) -> decimal")] +#[function("least(...) -> *float")] +#[function("least(...) -> serial")] +#[function("least(...) -> int256")] +#[function("least(...) -> date")] +#[function("least(...) -> time")] +#[function("least(...) -> interval")] +#[function("least(...) -> timestamp")] +#[function("least(...) -> timestamptz")] +#[function("least(...) -> varchar")] +#[function("least(...) -> bytea")] +pub fn general_variadic_least(row: impl Row) -> Option +where + T: Scalar, + for<'a> ::ScalarRefType<'a>: TryFrom> + Ord + Debug, +{ + row.iter() + .flatten() + .map( + |scalar| match <::ScalarRefType<'_>>::try_from(scalar) { + Ok(v) => v, + Err(_) => unreachable!("all input type should have been aligned in the frontend"), + }, + ) + .min() + .map(|v| v.to_owned_scalar()) +} + // optimized functions for bool arrays fn boolarray_eq(l: &BoolArray, r: &BoolArray) -> BoolArray { @@ -377,11 +415,20 @@ fn boolarray_is_not_false(a: &BoolArray) -> BoolArray { BoolArray::new(a.data() | !a.null_bitmap(), Bitmap::ones(a.len())) } +fn batch_is_null(a: &impl Array) -> BoolArray { + BoolArray::new(!a.null_bitmap(), Bitmap::ones(a.len())) +} + +fn batch_is_not_null(a: &impl Array) -> BoolArray { + BoolArray::new(a.null_bitmap().clone(), Bitmap::ones(a.len())) +} + #[cfg(test)] mod tests { use std::str::FromStr; - use risingwave_common::types::{Decimal, F32, F64}; + use risingwave_common::types::{Decimal, Timestamp, F32, F64}; + use risingwave_expr::expr::build_from_pretty; use super::*; @@ -430,4 +477,280 @@ mod tests { fn dec(s: &str) -> Decimal { Decimal::from_str(s).unwrap() } + + #[tokio::test] + async fn test_is_distinct_from() { + let (input, target) = DataChunk::from_pretty( + " + i i B + . . f + . 1 t + 1 . t + 2 2 f + 3 4 t + ", + ) + .split_column_at(2); + let expr = build_from_pretty("(is_distinct_from:boolean $0:int4 $1:int4)"); + let result = expr.eval(&input).await.unwrap(); + assert_eq!(&result, target.column_at(0)); + } + + #[tokio::test] + async fn test_is_not_distinct_from() { + let (input, target) = DataChunk::from_pretty( + " + i i B + . . t + . 1 f + 1 . f + 2 2 t + 3 4 f + ", + ) + .split_column_at(2); + let expr = build_from_pretty("(is_not_distinct_from:boolean $0:int4 $1:int4)"); + let result = expr.eval(&input).await.unwrap(); + assert_eq!(&result, target.column_at(0)); + } + + use risingwave_common::array::*; + use risingwave_common::row::OwnedRow; + use risingwave_common::types::test_utils::IntervalTestExt; + use risingwave_common::types::{Date, Interval, Scalar}; + use risingwave_pb::expr::expr_node::Type; + + use crate::scalar::arithmetic_op::{date_interval_add, date_interval_sub}; + + #[tokio::test] + async fn test_binary() { + test_binary_i32::(|x, y| x + y, Type::Add).await; + test_binary_i32::(|x, y| x - y, Type::Subtract).await; + test_binary_i32::(|x, y| x * y, Type::Multiply).await; + test_binary_i32::(|x, y| x / y, Type::Divide).await; + test_binary_i32::(|x, y| x == y, Type::Equal).await; + test_binary_i32::(|x, y| x != y, Type::NotEqual).await; + test_binary_i32::(|x, y| x > y, Type::GreaterThan).await; + test_binary_i32::(|x, y| x >= y, Type::GreaterThanOrEqual).await; + test_binary_i32::(|x, y| x < y, Type::LessThan).await; + test_binary_i32::(|x, y| x <= y, Type::LessThanOrEqual).await; + test_binary_inner::( + reduce(std::cmp::max::), + Type::Greatest, + ) + .await; + test_binary_inner::( + reduce(std::cmp::min::), + Type::Least, + ) + .await; + test_binary_inner::( + reduce(std::cmp::max::), + Type::Greatest, + ) + .await; + test_binary_inner::( + reduce(std::cmp::min::), + Type::Least, + ) + .await; + test_binary_decimal::(|x, y| x + y, Type::Add).await; + test_binary_decimal::(|x, y| x - y, Type::Subtract).await; + test_binary_decimal::(|x, y| x * y, Type::Multiply).await; + test_binary_decimal::(|x, y| x / y, Type::Divide).await; + test_binary_decimal::(|x, y| x == y, Type::Equal).await; + test_binary_decimal::(|x, y| x != y, Type::NotEqual).await; + test_binary_decimal::(|x, y| x > y, Type::GreaterThan).await; + test_binary_decimal::(|x, y| x >= y, Type::GreaterThanOrEqual).await; + test_binary_decimal::(|x, y| x < y, Type::LessThan).await; + test_binary_decimal::(|x, y| x <= y, Type::LessThanOrEqual).await; + test_binary_interval::( + |x, y| date_interval_add(x, y).unwrap(), + Type::Add, + ) + .await; + test_binary_interval::( + |x, y| date_interval_sub(x, y).unwrap(), + Type::Subtract, + ) + .await; + } + + trait TestFrom: Copy { + const NAME: &'static str; + fn test_from(i: usize) -> Self; + } + + impl TestFrom for i32 { + const NAME: &'static str = "int4"; + + fn test_from(i: usize) -> Self { + i as i32 + } + } + + impl TestFrom for Decimal { + const NAME: &'static str = "decimal"; + + fn test_from(i: usize) -> Self { + i.into() + } + } + + impl TestFrom for bool { + const NAME: &'static str = "boolean"; + + fn test_from(i: usize) -> Self { + i % 2 == 0 + } + } + + impl TestFrom for Timestamp { + const NAME: &'static str = "timestamp"; + + fn test_from(_: usize) -> Self { + unimplemented!("not implemented as input yet") + } + } + + impl TestFrom for Interval { + const NAME: &'static str = "interval"; + + fn test_from(i: usize) -> Self { + Interval::from_ymd(0, i as _, i as _) + } + } + + impl TestFrom for Date { + const NAME: &'static str = "date"; + + fn test_from(i: usize) -> Self { + Date::from_num_days_from_ce_uncheck(i as i32) + } + } + + #[expect(clippy::type_complexity)] + fn gen_test_data( + count: usize, + f: impl Fn(Option, Option) -> Option, + ) -> (Vec>, Vec>, Vec>) { + let mut lhs = Vec::>::new(); + let mut rhs = Vec::>::new(); + let mut target = Vec::>::new(); + for i in 0..count { + let (l, r) = if i % 2 == 0 { + (Some(i), None) + } else if i % 3 == 0 { + (Some(i), Some(i + 1)) + } else if i % 5 == 0 { + (Some(i + 1), Some(i)) + } else if i % 7 == 0 { + (None, Some(i)) + } else { + (Some(i), Some(i)) + }; + let l = l.map(TestFrom::test_from); + let r = r.map(TestFrom::test_from); + lhs.push(l); + rhs.push(r); + target.push(f(l, r)); + } + (lhs, rhs, target) + } + + fn arithmetic(f: impl Fn(L, R) -> O) -> impl Fn(Option, Option) -> Option { + move |l, r| match (l, r) { + (Some(l), Some(r)) => Some(f(l, r)), + _ => None, + } + } + + fn reduce(f: impl Fn(I, I) -> I) -> impl Fn(Option, Option) -> Option { + move |l, r| match (l, r) { + (Some(l), Some(r)) => Some(f(l, r)), + (Some(l), None) => Some(l), + (None, Some(r)) => Some(r), + (None, None) => None, + } + } + + async fn test_binary_inner(f: F, kind: Type) + where + L: Array, + L: for<'a> FromIterator<&'a Option<::OwnedItem>>, + ::OwnedItem: TestFrom, + R: Array, + R: for<'a> FromIterator<&'a Option<::OwnedItem>>, + ::OwnedItem: TestFrom, + A: Array, + for<'a> &'a A: std::convert::From<&'a ArrayImpl>, + for<'a> ::RefItem<'a>: PartialEq, + ::OwnedItem: TestFrom, + F: Fn( + Option<::OwnedItem>, + Option<::OwnedItem>, + ) -> Option<::OwnedItem>, + { + let (lhs, rhs, target) = gen_test_data(100, f); + + let col1 = L::from_iter(&lhs).into_ref(); + let col2 = R::from_iter(&rhs).into_ref(); + let data_chunk = DataChunk::new(vec![col1, col2], 100); + let l_name = <::OwnedItem as TestFrom>::NAME; + let r_name = <::OwnedItem as TestFrom>::NAME; + let output_name = <::OwnedItem as TestFrom>::NAME; + let expr = build_from_pretty(format!( + "({name}:{output_name} $0:{l_name} $1:{r_name})", + name = kind.as_str_name(), + )); + let res = expr.eval(&data_chunk).await.unwrap(); + let arr: &A = res.as_ref().into(); + for (idx, item) in arr.iter().enumerate() { + let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); + assert_eq!(x, item); + } + + for i in 0..lhs.len() { + let row = OwnedRow::new(vec![ + lhs[i].map(|int| int.to_scalar_value()), + rhs[i].map(|int| int.to_scalar_value()), + ]); + let result = expr.eval_row(&row).await.unwrap(); + let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); + assert_eq!(result, expected); + } + } + + async fn test_binary_i32(f: F, kind: Type) + where + A: Array, + for<'a> &'a A: std::convert::From<&'a ArrayImpl>, + for<'a> ::RefItem<'a>: PartialEq, + ::OwnedItem: TestFrom, + F: Fn(i32, i32) -> ::OwnedItem, + { + test_binary_inner::(arithmetic(f), kind).await + } + + async fn test_binary_interval(f: F, kind: Type) + where + A: Array, + for<'a> &'a A: std::convert::From<&'a ArrayImpl>, + for<'a> ::RefItem<'a>: PartialEq, + ::OwnedItem: TestFrom, + F: Fn(Date, Interval) -> ::OwnedItem, + { + test_binary_inner::(arithmetic(f), kind).await + } + + async fn test_binary_decimal(f: F, kind: Type) + where + A: Array, + for<'a> &'a A: std::convert::From<&'a ArrayImpl>, + for<'a> ::RefItem<'a>: PartialEq, + ::OwnedItem: TestFrom, + F: Fn(Decimal, Decimal) -> ::OwnedItem, + { + test_binary_inner::(arithmetic(f), kind).await + } } diff --git a/src/expr/src/vector_op/concat_op.rs b/src/expr/impl/src/scalar/concat_op.rs similarity index 90% rename from src/expr/src/vector_op/concat_op.rs rename to src/expr/impl/src/scalar/concat_op.rs index b6d362538a51f..ebfb726af4caf 100644 --- a/src/expr/src/vector_op/concat_op.rs +++ b/src/expr/impl/src/scalar/concat_op.rs @@ -14,10 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("concat_op(varchar, varchar) -> varchar")] -pub fn concat_op(left: &str, right: &str, writer: &mut dyn Write) { +pub fn concat_op(left: &str, right: &str, writer: &mut impl Write) { writer.write_str(left).unwrap(); writer.write_str(right).unwrap(); } diff --git a/src/expr/impl/src/scalar/concat_ws.rs b/src/expr/impl/src/scalar/concat_ws.rs new file mode 100644 index 0000000000000..e6f5e2d6b4870 --- /dev/null +++ b/src/expr/impl/src/scalar/concat_ws.rs @@ -0,0 +1,69 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Write; + +use risingwave_common::row::Row; +use risingwave_common::types::ToText; +use risingwave_expr::function; + +/// Concatenates all but the first argument, with separators. The first argument is used as the +/// separator string, and should not be NULL. Other NULL arguments are ignored. +#[function("concat_ws(varchar, ...) -> varchar")] +fn concat_ws(sep: &str, vals: impl Row, writer: &mut impl Write) -> Option<()> { + let mut string_iter = vals.iter().flatten(); + if let Some(string) = string_iter.next() { + string.write(writer).unwrap(); + } + for string in string_iter { + write!(writer, "{}", sep).unwrap(); + string.write(writer).unwrap(); + } + Some(()) +} + +#[cfg(test)] +mod tests { + use risingwave_common::array::DataChunk; + use risingwave_common::row::Row; + use risingwave_common::test_prelude::DataChunkTestExt; + use risingwave_common::types::ToOwnedDatum; + use risingwave_common::util::iter_util::ZipEqDebug; + use risingwave_expr::expr::build_from_pretty; + + #[tokio::test] + async fn test_concat_ws() { + let concat_ws = + build_from_pretty("(concat_ws:varchar $0:varchar $1:varchar $2:varchar $3:varchar)"); + let (input, expected) = DataChunk::from_pretty( + "T T T T T + , a b c a,b,c + , . b c b,c + . a b c . + , . . . (empty) + . . . . .", + ) + .split_column_at(4); + + // test eval + let output = concat_ws.eval(&input).await.unwrap(); + assert_eq!(&output, expected.column_at(0)); + + // test eval_row + for (row, expected) in input.rows().zip_eq_debug(expected.rows()) { + let result = concat_ws.eval_row(&row.to_owned_row()).await.unwrap(); + assert_eq!(result, expected.datum_at(0).to_owned_datum()); + } + } +} diff --git a/src/expr/src/vector_op/format_type.rs b/src/expr/impl/src/scalar/conjunction.rs similarity index 58% rename from src/expr/src/vector_op/format_type.rs rename to src/expr/impl/src/scalar/conjunction.rs index a8e497e6aaacf..2786e1579ba62 100644 --- a/src/expr/src/vector_op/format_type.rs +++ b/src/expr/impl/src/scalar/conjunction.rs @@ -12,15 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_common::types::DataType; -use risingwave_expr_macro::function; +use risingwave_expr::function; -#[function("format_type(int32, int32) -> varchar")] -pub fn format_type(oid: Option, _typemod: Option) -> Option> { - // since we don't support type modifier, ignore it. - oid.map(|i| { - DataType::from_oid(i) - .map(|dt| format!("{}", dt).into_boxed_str()) - .unwrap_or("???".into()) - }) +#[function("not(boolean) -> boolean")] +pub fn not(v: bool) -> bool { + !v +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_not() { + assert!(!not(true)); + assert!(not(false)); + } } diff --git a/src/expr/src/vector_op/date_trunc.rs b/src/expr/impl/src/scalar/date_trunc.rs similarity index 95% rename from src/expr/src/vector_op/date_trunc.rs rename to src/expr/impl/src/scalar/date_trunc.rs index 6700c793a8c0b..ffc24fbfc8137 100644 --- a/src/expr/src/vector_op/date_trunc.rs +++ b/src/expr/impl/src/scalar/date_trunc.rs @@ -13,10 +13,10 @@ // limitations under the License. use risingwave_common::types::{Interval, Timestamp, Timestamptz}; -use risingwave_expr_macro::{build_function, function}; +use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::{build_function, function, ExprError, Result}; use super::timestamptz::{timestamp_at_time_zone, timestamptz_at_time_zone}; -use crate::{ExprError, Result}; #[function("date_trunc(varchar, timestamp) -> timestamp")] pub fn date_trunc_timestamp(field: &str, ts: Timestamp) -> Result { @@ -42,8 +42,8 @@ pub fn date_trunc_timestamp(field: &str, ts: Timestamp) -> Result { #[build_function("date_trunc(varchar, timestamptz) -> timestamptz")] fn build_date_trunc_timestamptz_implicit_zone( _return_type: risingwave_common::types::DataType, - _children: Vec, -) -> Result { + _children: Vec, +) -> Result { Err(ExprError::UnsupportedFunction( "date_trunc of timestamptz should have been rewritten to include timezone".into(), )) diff --git a/src/expr/impl/src/scalar/delay.rs b/src/expr/impl/src/scalar/delay.rs new file mode 100644 index 0000000000000..ff963ffdd8560 --- /dev/null +++ b/src/expr/impl/src/scalar/delay.rs @@ -0,0 +1,52 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::time::Duration; + +use risingwave_common::types::{Interval, F64}; +use risingwave_expr::function; + +/// Makes the current session's process sleep until the given number of seconds have elapsed. +/// +/// ```slt +/// query I +/// SELECT pg_sleep(1.5); +/// ---- +/// NULL +/// ``` +#[function("pg_sleep(float8)", volatile)] +async fn pg_sleep(second: F64) { + tokio::time::sleep(Duration::from_secs_f64(second.0)).await; +} + +/// Makes the current session's process sleep until the given interval has elapsed. +/// +/// ```slt +/// query I +/// SELECT pg_sleep_for('1 second'); +/// ---- +/// NULL +/// ``` +#[function("pg_sleep_for(interval)", volatile)] +async fn pg_sleep_for(interval: Interval) { + // we only use the microsecond part of the interval + let usecs = if interval.is_positive() { + interval.usecs() as u64 + } else { + // return if the interval is not positive + return; + }; + let duration = Duration::from_micros(usecs); + tokio::time::sleep(duration).await; +} diff --git a/src/expr/src/vector_op/encdec.rs b/src/expr/impl/src/scalar/encdec.rs similarity index 97% rename from src/expr/src/vector_op/encdec.rs rename to src/expr/impl/src/scalar/encdec.rs index 7ea9d27e4778a..36faa428d80c2 100644 --- a/src/expr/src/vector_op/encdec.rs +++ b/src/expr/impl/src/scalar/encdec.rs @@ -14,11 +14,8 @@ use std::fmt::Write; -use hex; use risingwave_common::cast::{parse_bytes_hex, parse_bytes_traditional}; -use risingwave_expr_macro::function; - -use crate::{ExprError, Result}; +use risingwave_expr::{function, ExprError, Result}; const PARSE_BASE64_INVALID_END: &str = "invalid base64 end sequence"; const PARSE_BASE64_INVALID_PADDING: &str = "unexpected \"=\" while decoding base64 sequence"; @@ -38,7 +35,7 @@ const PARSE_BASE64_ALPHABET_DECODE_TABLE: [u8; 123] = [ ]; #[function("encode(bytea, varchar) -> varchar")] -pub fn encode(data: &[u8], format: &str, writer: &mut dyn Write) -> Result<()> { +pub fn encode(data: &[u8], format: &str, writer: &mut impl Write) -> Result<()> { match format { "base64" => { encode_bytes_base64(data, writer)?; @@ -78,7 +75,7 @@ pub fn decode(data: &str, format: &str) -> Result> { // According to https://www.postgresql.org/docs/current/functions-binarystring.html#ENCODE-FORMAT-BASE64 // We need to split newlines when the output length is greater than or equal to 76 -fn encode_bytes_base64(data: &[u8], writer: &mut dyn Write) -> Result<()> { +fn encode_bytes_base64(data: &[u8], writer: &mut impl Write) -> Result<()> { let mut idx: usize = 0; let len = data.len(); let mut written = 0; @@ -251,7 +248,7 @@ fn next(idx: &mut usize, data: &[u8]) -> Option { // According to https://www.postgresql.org/docs/current/functions-binarystring.html#ENCODE-FORMAT-ESCAPE // The escape format converts \0 and bytes with the high bit set into octal escape sequences (\nnn). // And doubles backslashes. -fn encode_bytes_escape(data: &[u8], writer: &mut dyn Write) -> std::fmt::Result { +fn encode_bytes_escape(data: &[u8], writer: &mut impl Write) -> std::fmt::Result { for b in data { match b { b'\0' | (b'\x80'..=b'\xff') => { diff --git a/src/expr/src/vector_op/exp.rs b/src/expr/impl/src/scalar/exp.rs similarity index 94% rename from src/expr/src/vector_op/exp.rs rename to src/expr/impl/src/scalar/exp.rs index 9430162b5b8ab..9ef7d55da1170 100644 --- a/src/expr/src/vector_op/exp.rs +++ b/src/expr/impl/src/scalar/exp.rs @@ -14,9 +14,7 @@ use num_traits::Zero; use risingwave_common::types::{Decimal, FloatExt, F64}; -use risingwave_expr_macro::function; - -use crate::{ExprError, Result}; +use risingwave_expr::{function, ExprError, Result}; fn err_logarithm_input() -> ExprError { ExprError::InvalidParam { @@ -25,7 +23,7 @@ fn err_logarithm_input() -> ExprError { } } -#[function("exp(float64) -> float64")] +#[function("exp(float8) -> float8")] pub fn exp_f64(input: F64) -> Result { // The cases where the exponent value is Inf or NaN can be handled explicitly and without // evaluating the `exp` operation. @@ -53,7 +51,7 @@ pub fn exp_f64(input: F64) -> Result { } } -#[function("ln(float64) -> float64")] +#[function("ln(float8) -> float8")] pub fn ln_f64(input: F64) -> Result { if input.0 <= 0.0 { return Err(err_logarithm_input()); @@ -61,7 +59,7 @@ pub fn ln_f64(input: F64) -> Result { Ok(input.ln()) } -#[function("log10(float64) -> float64")] +#[function("log10(float8) -> float8")] pub fn log10_f64(input: F64) -> Result { if input.0 <= 0.0 { return Err(err_logarithm_input()); @@ -87,9 +85,9 @@ pub fn log10_decimal(input: Decimal) -> Result { #[cfg(test)] mod tests { use risingwave_common::types::F64; + use risingwave_expr::ExprError; use super::exp_f64; - use crate::ExprError; #[test] fn legal_input() { diff --git a/src/expr/impl/src/scalar/extract.rs b/src/expr/impl/src/scalar/extract.rs new file mode 100644 index 0000000000000..d42489d1bf9ef --- /dev/null +++ b/src/expr/impl/src/scalar/extract.rs @@ -0,0 +1,520 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::str::FromStr; + +use chrono::{Datelike, NaiveTime, Timelike}; +use risingwave_common::types::{Date, Decimal, Interval, Time, Timestamp, Timestamptz, F64}; +use risingwave_expr::{function, ExprError, Result}; + +use self::Unit::*; +use crate::scalar::timestamptz::time_zone_err; + +/// Extract field from `Datelike`. +fn extract_from_datelike(date: impl Datelike, unit: Unit) -> Decimal { + match unit { + Millennium => ((date.year() - 1) / 1000 + 1).into(), + Century => ((date.year() - 1) / 100 + 1).into(), + Decade => (date.year() / 10).into(), + Year => date.year().into(), + IsoYear => date.iso_week().year().into(), + Quarter => ((date.month() - 1) / 3 + 1).into(), + Month => date.month().into(), + Week => date.iso_week().week().into(), + Day => date.day().into(), + Doy => date.ordinal().into(), + Dow => date.weekday().num_days_from_sunday().into(), + IsoDow => date.weekday().number_from_monday().into(), + u => unreachable!("invalid unit {:?} for date", u), + } +} + +/// Extract field from `Timelike`. +fn extract_from_timelike(time: impl Timelike, unit: Unit) -> Decimal { + let usecs = || time.second() as u64 * 1_000_000 + (time.nanosecond() / 1000) as u64; + match unit { + Hour => time.hour().into(), + Minute => time.minute().into(), + Second => Decimal::from_i128_with_scale(usecs() as i128, 6), + Millisecond => Decimal::from_i128_with_scale(usecs() as i128, 3), + Microsecond => usecs().into(), + Epoch => { + let usecs = time.num_seconds_from_midnight() as u64 * 1_000_000 + + (time.nanosecond() / 1000) as u64; + Decimal::from_i128_with_scale(usecs as i128, 6) + } + u => unreachable!("invalid unit {:?} for time", u), + } +} + +#[function( + "extract(varchar, date) -> decimal", + prebuild = "Unit::from_str($0)?.ensure_date()?" +)] +fn extract_from_date(date: Date, unit: &Unit) -> Decimal { + match unit { + Epoch => { + let epoch = date.0.and_time(NaiveTime::default()).timestamp(); + epoch.into() + } + Julian => { + const UNIX_EPOCH_DAY: i32 = 719_163; + let julian = date.0.num_days_from_ce() - UNIX_EPOCH_DAY + 2_440_588; + julian.into() + } + _ => extract_from_datelike(date.0, *unit), + } +} + +#[function( + "extract(varchar, time) -> decimal", + prebuild = "Unit::from_str($0)?.ensure_time()?" +)] +fn extract_from_time(time: Time, unit: &Unit) -> Decimal { + extract_from_timelike(time.0, *unit) +} + +#[function( + "extract(varchar, timestamp) -> decimal", + prebuild = "Unit::from_str($0)?.ensure_timestamp()?" +)] +fn extract_from_timestamp(timestamp: Timestamp, unit: &Unit) -> Decimal { + match unit { + Epoch => { + let epoch = timestamp.0.timestamp_micros(); + Decimal::from_i128_with_scale(epoch as i128, 6) + } + Julian => { + let epoch = Decimal::from_i128_with_scale(timestamp.0.timestamp_micros() as i128, 6); + epoch / (24 * 60 * 60).into() + 2_440_588.into() + } + _ if unit.is_date_unit() => extract_from_datelike(timestamp.0.date(), *unit), + _ if unit.is_time_unit() => extract_from_timelike(timestamp.0.time(), *unit), + u => unreachable!("invalid unit {:?} for timestamp", u), + } +} + +#[function( + "extract(varchar, timestamptz) -> decimal", + prebuild = "Unit::from_str($0)?.ensure_timestamptz()?" +)] +fn extract_from_timestamptz(tz: Timestamptz, unit: &Unit) -> Decimal { + match unit { + Epoch => Decimal::from_i128_with_scale(tz.timestamp_micros() as _, 6), + // TODO(#5826): all other units depend on implicit session TimeZone + u => unreachable!("invalid unit {u:?} for timestamp with time zone"), + } +} + +#[function( + "extract(varchar, timestamptz, varchar) -> decimal", + prebuild = "Unit::from_str($0)?.ensure_timestamptz_at_timezone()?" +)] +fn extract_from_timestamptz_at_timezone( + input: Timestamptz, + timezone: &str, + unit: &Unit, +) -> Result { + use chrono::Offset as _; + + let time_zone = Timestamptz::lookup_time_zone(timezone).map_err(time_zone_err)?; + let instant_local = input.to_datetime_in_zone(time_zone); + + Ok(match unit { + Epoch => Decimal::from_i128_with_scale(instant_local.timestamp_micros() as _, 6), + Timezone => { + let east_secs = instant_local.offset().fix().local_minus_utc(); + east_secs.into() + } + Timezone_Hour => { + let east_secs = instant_local.offset().fix().local_minus_utc(); + (east_secs / 3600).into() + } + Timezone_Minute => { + let east_secs = instant_local.offset().fix().local_minus_utc(); + (east_secs % 3600 / 60).into() + } + _ => extract_from_timestamp(instant_local.naive_local().into(), unit), + }) +} + +#[function( + "extract(varchar, interval) -> decimal", + prebuild = "Unit::from_str($0)?.ensure_interval()?" +)] +fn extract_from_interval(interval: Interval, unit: &Unit) -> Decimal { + match unit { + Millennium => (interval.years_field() / 1000).into(), + Century => (interval.years_field() / 100).into(), + Decade => (interval.years_field() / 10).into(), + Year => interval.years_field().into(), + Quarter => (interval.months_field() / 3 + 1).into(), + Month => interval.months_field().into(), + Day => interval.days_field().into(), + Hour => interval.hours_field().into(), + Minute => interval.minutes_field().into(), + Second => Decimal::from_i128_with_scale(interval.seconds_in_micros() as i128, 6), + Millisecond => Decimal::from_i128_with_scale(interval.seconds_in_micros() as i128, 3), + Microsecond => interval.seconds_in_micros().into(), + Epoch => Decimal::from_i128_with_scale(interval.epoch_in_micros(), 6), + u => unreachable!("invalid unit {:?} for interval", u), + } +} + +#[function( + "date_part(varchar, date) -> float8", + prebuild = "Unit::from_str($0)?.ensure_date()?" +)] +fn date_part_from_date(date: Date, unit: &Unit) -> Result { + // date_part of date manually cast to timestamp + // https://github.com/postgres/postgres/blob/REL_15_2/src/backend/catalog/system_functions.sql#L123 + extract_from_timestamp(date.into(), unit) + .try_into() + .map_err(|_| ExprError::NumericOutOfRange) +} + +#[function( + "date_part(varchar, time) -> float8", + prebuild = "Unit::from_str($0)?.ensure_time()?" +)] +fn date_part_from_time(time: Time, unit: &Unit) -> Result { + extract_from_time(time, unit) + .try_into() + .map_err(|_| ExprError::NumericOutOfRange) +} + +#[function( + "date_part(varchar, timestamp) -> float8", + prebuild = "Unit::from_str($0)?.ensure_timestamp()?" +)] +fn date_part_from_timestamp(timestamp: Timestamp, unit: &Unit) -> Result { + extract_from_timestamp(timestamp, unit) + .try_into() + .map_err(|_| ExprError::NumericOutOfRange) +} + +#[function( + "date_part(varchar, timestamptz) -> float8", + prebuild = "Unit::from_str($0)?.ensure_timestamptz()?" +)] +fn date_part_from_timestamptz(input: Timestamptz, unit: &Unit) -> Result { + extract_from_timestamptz(input, unit) + .try_into() + .map_err(|_| ExprError::NumericOutOfRange) +} + +#[function( + "date_part(varchar, timestamptz, varchar) -> float8", + prebuild = "Unit::from_str($0)?.ensure_timestamptz_at_timezone()?" +)] +fn date_part_from_timestamptz_at_timezone( + input: Timestamptz, + timezone: &str, + unit: &Unit, +) -> Result { + extract_from_timestamptz_at_timezone(input, timezone, unit)? + .try_into() + .map_err(|_| ExprError::NumericOutOfRange) +} + +#[function( + "date_part(varchar, interval) -> float8", + prebuild = "Unit::from_str($0)?.ensure_interval()?" +)] +fn date_part_from_interval(interval: Interval, unit: &Unit) -> Result { + extract_from_interval(interval, unit) + .try_into() + .map_err(|_| ExprError::NumericOutOfRange) +} + +/// Define an enum and its `FromStr` impl. +macro_rules! define_unit { + ($(#[ $attr:meta ])* enum $name:ident { $($variant:ident,)* }) => { + $(#[$attr])* + #[derive(Debug, PartialEq, Eq, Clone, Copy)] + enum $name { + $($variant,)* + } + + impl FromStr for $name { + type Err = ExprError; + + fn from_str(s: &str) -> Result { + $( + if s.eq_ignore_ascii_case(stringify!($variant)) { + return Ok(Self::$variant); + } + )* + Err(invalid_unit(s)) + } + } + }; +} + +define_unit! { + /// Datetime units. + #[allow(non_camel_case_types)] + enum Unit { + Millennium, + Century, + Decade, + Year, + IsoYear, + Quarter, + Month, + Week, + Day, + Doy, + Dow, + IsoDow, + Hour, + Minute, + Second, + Millisecond, + Microsecond, + Epoch, + Julian, + Timezone, + Timezone_Hour, + Timezone_Minute, + } +} + +impl Unit { + /// Whether the unit is a valid date unit. + #[rustfmt::skip] + const fn is_date_unit(self) -> bool { + matches!( + self, + Millennium | Century | Decade | Year | IsoYear | Quarter | Month | Week + | Day | Doy | Dow | IsoDow | Epoch | Julian + ) + } + + /// Whether the unit is a valid time unit. + const fn is_time_unit(self) -> bool { + matches!( + self, + Hour | Minute | Second | Millisecond | Microsecond | Epoch + ) + } + + /// Whether the unit is a valid timestamp unit. + const fn is_timestamp_unit(self) -> bool { + self.is_date_unit() || self.is_time_unit() + } + + /// Whether the unit is a valid timestamptz unit. + const fn is_timestamptz_unit(self) -> bool { + matches!(self, Epoch) + } + + /// Whether the unit is a valid timestamptz at timezone unit. + const fn is_timestamptz_at_timezone_unit(self) -> bool { + self.is_timestamp_unit() || matches!(self, Timezone | Timezone_Hour | Timezone_Minute) + } + + /// Whether the unit is a valid interval unit. + #[rustfmt::skip] + const fn is_interval_unit(self) -> bool { + matches!( + self, + Millennium | Century | Decade | Year | Quarter | Month | Day | Hour | Minute + | Second | Millisecond | Microsecond | Epoch + ) + } + + /// Ensure the unit is a valid date unit. + fn ensure_date(self) -> Result { + if self.is_date_unit() { + Ok(self) + } else { + Err(unsupported_unit(self, "date")) + } + } + + /// Ensure the unit is a valid time unit. + fn ensure_time(self) -> Result { + if self.is_time_unit() { + Ok(self) + } else { + Err(unsupported_unit(self, "time")) + } + } + + /// Ensure the unit is a valid timestamp unit. + fn ensure_timestamp(self) -> Result { + if self.is_timestamp_unit() { + Ok(self) + } else { + Err(unsupported_unit(self, "timestamp")) + } + } + + /// Ensure the unit is a valid timestamptz unit. + fn ensure_timestamptz(self) -> Result { + if self.is_timestamptz_unit() { + Ok(self) + } else { + Err(unsupported_unit(self, "timestamp with time zone")) + } + } + + /// Ensure the unit is a valid timestamptz unit. + fn ensure_timestamptz_at_timezone(self) -> Result { + if self.is_timestamptz_at_timezone_unit() { + Ok(self) + } else { + Err(unsupported_unit(self, "timestamp with time zone")) + } + } + + /// Ensure the unit is a valid interval unit. + fn ensure_interval(self) -> Result { + if self.is_interval_unit() { + Ok(self) + } else { + Err(unsupported_unit(self, "interval")) + } + } +} + +fn invalid_unit(unit: &str) -> ExprError { + ExprError::InvalidParam { + name: "unit", + reason: format!("unit \"{unit}\" not recognized").into(), + } +} + +fn unsupported_unit(unit: Unit, type_: &str) -> ExprError { + ExprError::InvalidParam { + name: "unit", + reason: format!("unit \"{unit:?}\" not supported for type {type_}").into(), + } +} + +#[cfg(test)] +mod tests { + use chrono::{NaiveDate, NaiveDateTime}; + + use super::*; + + #[test] + fn test_extract_from_date() { + let date = Date::new(NaiveDate::parse_from_str("2021-11-22", "%Y-%m-%d").unwrap()); + let extract = |i| extract_from_date(date, &i).to_string(); + assert_eq!(extract(Day), "22"); + assert_eq!(extract(Month), "11"); + assert_eq!(extract(Year), "2021"); + assert_eq!(extract(Dow), "1"); + assert_eq!(extract(Doy), "326"); + assert_eq!(extract(Millennium), "3"); + assert_eq!(extract(Century), "21"); + assert_eq!(extract(Decade), "202"); + assert_eq!(extract(IsoYear), "2021"); + assert_eq!(extract(Quarter), "4"); + assert_eq!(extract(Week), "47"); + assert_eq!(extract(IsoDow), "1"); + assert_eq!(extract(Epoch), "1637539200"); + assert_eq!(extract(Julian), "2459541"); + } + + #[test] + fn test_extract_from_time() { + let time: Time = "23:22:57.123450".parse().unwrap(); + let extract = |unit| extract_from_time(time, &unit).to_string(); + assert_eq!(extract(Hour), "23"); + assert_eq!(extract(Minute), "22"); + assert_eq!(extract(Second), "57.123450"); + assert_eq!(extract(Millisecond), "57123.450"); + assert_eq!(extract(Microsecond), "57123450"); + assert_eq!(extract(Epoch), "84177.123450"); + } + + #[test] + fn test_extract_from_timestamp() { + let ts = Timestamp::new( + NaiveDateTime::parse_from_str("2021-11-22 12:4:2.575400", "%Y-%m-%d %H:%M:%S%.f") + .unwrap(), + ); + let extract = |unit| extract_from_timestamp(ts, &unit).to_string(); + assert_eq!(extract(Millennium), "3"); + assert_eq!(extract(Century), "21"); + assert_eq!(extract(Decade), "202"); + assert_eq!(extract(IsoYear), "2021"); + assert_eq!(extract(Year), "2021"); + assert_eq!(extract(Quarter), "4"); + assert_eq!(extract(Month), "11"); + assert_eq!(extract(Week), "47"); + assert_eq!(extract(Day), "22"); + assert_eq!(extract(Dow), "1"); + assert_eq!(extract(IsoDow), "1"); + assert_eq!(extract(Doy), "326"); + assert_eq!(extract(Hour), "12"); + assert_eq!(extract(Minute), "4"); + assert_eq!(extract(Second), "2.575400"); + assert_eq!(extract(Millisecond), "2575.400"); + assert_eq!(extract(Microsecond), "2575400"); + assert_eq!(extract(Epoch), "1637582642.575400"); + assert_eq!(extract(Julian), "2459541.5028075856481481481481"); + } + + #[test] + fn test_extract_from_timestamptz() { + let ts: Timestamptz = "2023-06-01 00:00:00Z".parse().unwrap(); + let extract = |unit| { + extract_from_timestamptz_at_timezone(ts, "pst8pdt", &unit) + .unwrap() + .to_string() + }; + assert_eq!(extract(Timezone), "-25200"); + assert_eq!(extract(Timezone_Hour), "-7"); + assert_eq!(extract(Timezone_Minute), "0"); + } + + #[test] + fn test_extract_from_interval() { + let interval: Interval = "2345 years 1 mon 250 days 23:22:57.123450".parse().unwrap(); + let extract = |unit| extract_from_interval(interval, &unit).to_string(); + assert_eq!(extract(Millennium), "2"); + assert_eq!(extract(Century), "23"); + assert_eq!(extract(Decade), "234"); + assert_eq!(extract(Year), "2345"); + assert_eq!(extract(Month), "1"); + assert_eq!(extract(Day), "250"); + assert_eq!(extract(Hour), "23"); + assert_eq!(extract(Minute), "22"); + assert_eq!(extract(Second), "57.123450"); + assert_eq!(extract(Millisecond), "57123.450"); + assert_eq!(extract(Microsecond), "57123450"); + assert_eq!(extract(Epoch), "74026848177.123450"); + + let interval: Interval = "-2345 years -1 mon -250 days -23:22:57.123450" + .parse() + .unwrap(); + let extract = |unit| extract_from_interval(interval, &unit).to_string(); + assert_eq!(extract(Millennium), "-2"); + assert_eq!(extract(Century), "-23"); + assert_eq!(extract(Decade), "-234"); + assert_eq!(extract(Year), "-2345"); + assert_eq!(extract(Month), "-1"); + assert_eq!(extract(Day), "-250"); + assert_eq!(extract(Hour), "-23"); + assert_eq!(extract(Minute), "-22"); + assert_eq!(extract(Second), "-57.123450"); + assert_eq!(extract(Millisecond), "-57123.450"); + assert_eq!(extract(Microsecond), "-57123450"); + assert_eq!(extract(Epoch), "-74026848177.123450"); + } +} diff --git a/src/common/src/format.rs b/src/expr/impl/src/scalar/format.rs similarity index 50% rename from src/common/src/format.rs rename to src/expr/impl/src/scalar/format.rs index 4bd5e8c905a4d..2110bbf0db9e5 100644 --- a/src/common/src/format.rs +++ b/src/expr/impl/src/scalar/format.rs @@ -12,7 +12,52 @@ // See the License for the specific language governing permissions and // limitations under the License. -use thiserror::Error; +use std::fmt::Write; +use std::str::FromStr; + +use risingwave_common::row::Row; +use risingwave_common::types::{ScalarRefImpl, ToText}; +use risingwave_expr::{function, ExprError, Result}; + +use super::string::quote_ident; + +/// Formats arguments according to a format string. +#[function( + "format(varchar, ...) -> varchar", + prebuild = "Formatter::from_str($0).map_err(|e| ExprError::Parse(e.to_string().into()))?" +)] +fn format(formatter: &Formatter, row: impl Row, writer: &mut impl Write) -> Result<()> { + let mut args = row.iter(); + for node in &formatter.nodes { + match node { + FormatterNode::Literal(literal) => writer.write_str(literal).unwrap(), + FormatterNode::Specifier(sp) => { + let arg = args.next().ok_or(ExprError::TooFewArguments)?; + match sp.ty { + SpecifierType::SimpleString => { + if let Some(scalar) = arg { + scalar.write(writer).unwrap(); + } + } + SpecifierType::SqlIdentifier => match arg { + Some(ScalarRefImpl::Utf8(arg)) => quote_ident(arg, writer), + _ => { + return Err(ExprError::UnsupportedFunction( + "unsupported data for specifier type 'I'".to_string(), + )) + } + }, + SpecifierType::SqlLiteral => { + return Err(ExprError::UnsupportedFunction( + "unsupported specifier type 'L'".to_string(), + )) + } + } + } + } + } + Ok(()) +} /// The type of format conversion to use to produce the format specifier's output. #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -31,7 +76,7 @@ pub enum SpecifierType { impl TryFrom for SpecifierType { type Error = (); - fn try_from(c: char) -> Result { + fn try_from(c: char) -> std::result::Result { match c { 's' => Ok(SpecifierType::SimpleString), 'I' => Ok(SpecifierType::SqlIdentifier), @@ -42,34 +87,36 @@ impl TryFrom for SpecifierType { } #[derive(Debug)] -pub struct Specifier { +struct Specifier { // TODO: support position, flags and width. - pub ty: SpecifierType, + ty: SpecifierType, } #[derive(Debug)] -pub enum FormatterNode { +enum FormatterNode { Specifier(Specifier), Literal(String), } #[derive(Debug)] -pub struct Formatter { +struct Formatter { nodes: Vec, } -#[derive(Debug, Error)] -pub enum ParseFormatError { +#[derive(Debug, thiserror::Error)] +enum ParseFormatError { #[error("unrecognized format() type specifier \"{0}\"")] UnrecognizedSpecifierType(char), #[error("unterminated format() type specifier")] UnterminatedSpecifier, } -impl Formatter { +impl FromStr for Formatter { + type Err = ParseFormatError; + /// Parse the format string into a high-efficient representation. /// - pub fn parse(format: &str) -> Result { + fn from_str(format: &str) -> std::result::Result { // 8 is a good magic number here, it can cover an input like 'Testing %s, %s, %s, %%'. let mut nodes = Vec::with_capacity(8); let mut after_percent = false; @@ -106,8 +153,37 @@ impl Formatter { Ok(Formatter { nodes }) } +} - pub fn nodes(&self) -> &[FormatterNode] { - &self.nodes +#[cfg(test)] +mod tests { + use risingwave_common::array::DataChunk; + use risingwave_common::row::Row; + use risingwave_common::test_prelude::DataChunkTestExt; + use risingwave_common::types::ToOwnedDatum; + use risingwave_common::util::iter_util::ZipEqDebug; + use risingwave_expr::expr::build_from_pretty; + + #[tokio::test] + async fn test_format() { + let format = build_from_pretty("(format:varchar $0:varchar $1:varchar $2:varchar)"); + let (input, expected) = DataChunk::from_pretty( + "T T T T + Hello%s World . HelloWorld + %s%s Hello World HelloWorld + %I && . \"&&\" + . a b .", + ) + .split_column_at(3); + + // test eval + let output = format.eval(&input).await.unwrap(); + assert_eq!(&output, expected.column_at(0)); + + // test eval_row + for (row, expected) in input.rows().zip_eq_debug(expected.rows()) { + let result = format.eval_row(&row.to_owned_row()).await.unwrap(); + assert_eq!(result, expected.datum_at(0).to_owned_datum()); + } } } diff --git a/src/expr/impl/src/scalar/format_type.rs b/src/expr/impl/src/scalar/format_type.rs new file mode 100644 index 0000000000000..f2a906df7c3b7 --- /dev/null +++ b/src/expr/impl/src/scalar/format_type.rs @@ -0,0 +1,50 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::types::DataType; +use risingwave_expr::function; + +#[function("format_type(int4, int4) -> varchar")] +pub fn format_type(oid: Option, _typemod: Option) -> Option> { + // since we don't support type modifier, ignore it. + oid.map(|i| { + DataType::from_oid(i) + .map(|dt| format!("{}", dt).into_boxed_str()) + .unwrap_or("???".into()) + }) +} + +#[cfg(test)] +mod tests { + use risingwave_common::array::DataChunk; + use risingwave_common::test_prelude::DataChunkTestExt; + use risingwave_expr::expr::build_from_pretty; + + #[tokio::test] + async fn test_format_type() { + let (input, target) = DataChunk::from_pretty( + " + i i T + 16 0 boolean + 21 . smallint + 9527 0 ??? + . 0 . + ", + ) + .split_column_at(2); + let expr = build_from_pretty("(format_type:varchar $0:int4 $1:int4)"); + let result = expr.eval(&input).await.unwrap(); + assert_eq!(&result, target.column_at(0)); + } +} diff --git a/src/expr/src/vector_op/int256.rs b/src/expr/impl/src/scalar/int256.rs similarity index 94% rename from src/expr/src/vector_op/int256.rs rename to src/expr/impl/src/scalar/int256.rs index 378bb5dd58a1f..d8735e12d9b6d 100644 --- a/src/expr/src/vector_op/int256.rs +++ b/src/expr/impl/src/scalar/int256.rs @@ -13,10 +13,8 @@ // limitations under the License. use risingwave_common::types::Int256; -use risingwave_expr_macro::function; - -use crate::ExprError::Parse; -use crate::Result; +use risingwave_expr::ExprError::Parse; +use risingwave_expr::{function, Result}; const MAX_AVAILABLE_HEX_STR_LEN: usize = 66; @@ -52,9 +50,9 @@ pub fn hex_to_int256(s: &str) -> Result { #[cfg(test)] mod tests { use risingwave_common::types::Int256; + use risingwave_expr::ExprError::Parse; - use crate::vector_op::int256::hex_to_int256; - use crate::ExprError::Parse; + use crate::scalar::int256::hex_to_int256; #[test] fn test_hex_to_int256() { diff --git a/src/expr/impl/src/scalar/jsonb_access.rs b/src/expr/impl/src/scalar/jsonb_access.rs new file mode 100644 index 0000000000000..8115c1d7214ab --- /dev/null +++ b/src/expr/impl/src/scalar/jsonb_access.rs @@ -0,0 +1,60 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Write; + +use risingwave_common::types::JsonbRef; +use risingwave_expr::function; + +#[function("jsonb_access_inner(jsonb, varchar) -> jsonb")] +pub fn jsonb_object_field<'a>(v: JsonbRef<'a>, p: &str) -> Option> { + v.access_object_field(p) +} + +#[function("jsonb_access_inner(jsonb, int4) -> jsonb")] +pub fn jsonb_array_element(v: JsonbRef<'_>, p: i32) -> Option> { + let idx = if p < 0 { + let Ok(len) = v.array_len() else { + return None; + }; + if ((-p) as usize) > len { + return None; + } else { + len - ((-p) as usize) + } + } else { + p as usize + }; + v.access_array_element(idx) +} + +#[function("jsonb_access_str(jsonb, varchar) -> varchar")] +pub fn jsonb_object_field_str(v: JsonbRef<'_>, p: &str, writer: &mut impl Write) -> Option<()> { + let jsonb = jsonb_object_field(v, p)?; + if jsonb.is_jsonb_null() { + return None; + } + jsonb.force_str(writer).unwrap(); + Some(()) +} + +#[function("jsonb_access_str(jsonb, int4) -> varchar")] +pub fn jsonb_array_element_str(v: JsonbRef<'_>, p: i32, writer: &mut impl Write) -> Option<()> { + let jsonb = jsonb_array_element(v, p)?; + if jsonb.is_jsonb_null() { + return None; + } + jsonb.force_str(writer).unwrap(); + Some(()) +} diff --git a/src/expr/impl/src/scalar/jsonb_concat.rs b/src/expr/impl/src/scalar/jsonb_concat.rs new file mode 100644 index 0000000000000..6277db8d5b981 --- /dev/null +++ b/src/expr/impl/src/scalar/jsonb_concat.rs @@ -0,0 +1,101 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::types::{JsonbRef, JsonbVal}; +use risingwave_expr::function; +use serde_json::{json, Value}; + +/// Concatenates the two jsonbs. +/// +/// Examples: +/// +/// ```slt +/// # concat +/// query T +/// SELECT '[1,2]'::jsonb || '[3,4]'::jsonb; +/// ---- +/// [1, 2, 3, 4] +/// +/// query T +/// SELECT '{"a": 1}'::jsonb || '{"b": 2}'::jsonb; +/// ---- +/// {"a": 1, "b": 2} +/// +/// query T +/// SELECT '[1,2]'::jsonb || '{"a": 1}'::jsonb; +/// ---- +/// [1, 2, {"a": 1}] +/// +/// query T +/// SELECT '1'::jsonb || '2'::jsonb; +/// ---- +/// [1, 2] +/// +/// query T +/// SELECT '[1,2]'::jsonb || 'null'::jsonb; +/// ---- +/// [1, 2, null] +/// +/// query T +/// SELECT 'null'::jsonb || '[1,2]'::jsonb; +/// ---- +/// [null, 1, 2] +/// +/// query T +/// SELECT 'null'::jsonb || '1'::jsonb; +/// ---- +/// [null, 1] +/// ``` +#[function("jsonb_cat(jsonb, jsonb) -> jsonb")] +pub fn jsonb_cat(left: JsonbRef<'_>, right: JsonbRef<'_>) -> JsonbVal { + let left_val = left.value().clone(); + let right_val = right.value().clone(); + match (left_val, right_val) { + // left and right are object based. + // This would have left:{'a':1}, right:{'b':2} -> {'a':1,'b':2} + (Value::Object(mut left_map), Value::Object(right_map)) => { + left_map.extend(right_map); + JsonbVal::from(Value::Object(left_map)) + } + + // left and right are array-based. + // This would merge both arrays into one array. + // This would have left:[1,2], right:[3,4] -> [1,2,3,4] + (Value::Array(mut left_arr), Value::Array(right_arr)) => { + left_arr.extend(right_arr); + JsonbVal::from(Value::Array(left_arr)) + } + + // One operand is an array, and the other is a single element. + // This would insert the non-array value as another element into the array + // Eg left:[1,2] right: {'a':1} -> [1,2,{'a':1}] + (Value::Array(mut left_arr), single_val) => { + left_arr.push(single_val); + JsonbVal::from(Value::Array(left_arr)) + } + + // One operand is an array, and the other is a single element. + // This would insert the non-array value as another element into the array + // Eg left:{'a':1} right:[1,2] -> [{'a':1},1,2] + (single_val, Value::Array(mut right_arr)) => { + right_arr.insert(0, single_val); + JsonbVal::from(Value::Array(right_arr)) + } + + // Both are non-array inputs. + // Both elements would be placed together in an array + // Eg left:1 right: 2 -> [1,2] + (left, right) => JsonbVal::from(json!([left, right])), + } +} diff --git a/src/expr/src/vector_op/jsonb_info.rs b/src/expr/impl/src/scalar/jsonb_info.rs similarity index 90% rename from src/expr/src/vector_op/jsonb_info.rs rename to src/expr/impl/src/scalar/jsonb_info.rs index 5523e120d5fb1..28acf7762a9f8 100644 --- a/src/expr/src/vector_op/jsonb_info.rs +++ b/src/expr/impl/src/scalar/jsonb_info.rs @@ -15,16 +15,14 @@ use std::fmt::Write; use risingwave_common::types::JsonbRef; -use risingwave_expr_macro::function; - -use crate::{ExprError, Result}; +use risingwave_expr::{function, ExprError, Result}; #[function("jsonb_typeof(jsonb) -> varchar")] -pub fn jsonb_typeof(v: JsonbRef<'_>, writer: &mut dyn Write) { +pub fn jsonb_typeof(v: JsonbRef<'_>, writer: &mut impl Write) { writer.write_str(v.type_name()).unwrap() } -#[function("jsonb_array_length(jsonb) -> int32")] +#[function("jsonb_array_length(jsonb) -> int4")] pub fn jsonb_array_length(v: JsonbRef<'_>) -> Result { v.array_len() .map(|n| n as i32) diff --git a/src/expr/src/vector_op/length.rs b/src/expr/impl/src/scalar/length.rs similarity index 82% rename from src/expr/src/vector_op/length.rs rename to src/expr/impl/src/scalar/length.rs index f102d2b4e2df2..d447941e30209 100644 --- a/src/expr/src/vector_op/length.rs +++ b/src/expr/impl/src/scalar/length.rs @@ -12,23 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr_macro::function; +use risingwave_expr::function; -#[function("length(varchar) -> int32")] -#[function("char_length(varchar) -> int32")] +#[function("length(varchar) -> int4")] +#[function("char_length(varchar) -> int4")] pub fn char_length(s: &str) -> i32 { s.chars().count() as i32 } -#[function("octet_length(varchar) -> int32")] -#[function("length(bytea) -> int32")] -#[function("octet_length(bytea) -> int32")] +#[function("octet_length(varchar) -> int4")] +#[function("length(bytea) -> int4")] +#[function("octet_length(bytea) -> int4")] pub fn octet_length(s: impl AsRef<[u8]>) -> i32 { s.as_ref().len() as i32 } -#[function("bit_length(varchar) -> int32")] -#[function("bit_length(bytea) -> int32")] +#[function("bit_length(varchar) -> int4")] +#[function("bit_length(bytea) -> int4")] pub fn bit_length(s: impl AsRef<[u8]>) -> i32 { octet_length(s) * 8 } diff --git a/src/expr/src/vector_op/lower.rs b/src/expr/impl/src/scalar/lower.rs similarity index 93% rename from src/expr/src/vector_op/lower.rs rename to src/expr/impl/src/scalar/lower.rs index c76de5b129c27..d24ee16d9f8f9 100644 --- a/src/expr/src/vector_op/lower.rs +++ b/src/expr/impl/src/scalar/lower.rs @@ -14,10 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("lower(varchar) -> varchar")] -pub fn lower(s: &str, writer: &mut dyn Write) { +pub fn lower(s: &str, writer: &mut impl Write) { for c in s.chars() { writer.write_char(c.to_ascii_lowercase()).unwrap(); } diff --git a/src/expr/src/vector_op/md5.rs b/src/expr/impl/src/scalar/md5.rs similarity index 93% rename from src/expr/src/vector_op/md5.rs rename to src/expr/impl/src/scalar/md5.rs index 177b43ae4196b..a1a3c30501b81 100644 --- a/src/expr/src/vector_op/md5.rs +++ b/src/expr/impl/src/scalar/md5.rs @@ -14,15 +14,15 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("md5(varchar) -> varchar")] -pub fn md5(s: &str, writer: &mut dyn Write) { +pub fn md5(s: &str, writer: &mut impl Write) { write!(writer, "{:x}", ::md5::compute(s)).unwrap(); } #[function("md5(bytea) -> varchar")] -pub fn md5_from_bytea(s: &[u8], writer: &mut dyn Write) { +pub fn md5_from_bytea(s: &[u8], writer: &mut impl Write) { writer .write_str(&::hex::encode(::md5::compute(s).0)) .unwrap(); diff --git a/src/expr/impl/src/scalar/mod.rs b/src/expr/impl/src/scalar/mod.rs new file mode 100644 index 0000000000000..dd88a374ba966 --- /dev/null +++ b/src/expr/impl/src/scalar/mod.rs @@ -0,0 +1,71 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod arithmetic_op; +mod array; +mod array_access; +mod array_concat; +mod array_distinct; +mod array_length; +mod array_min_max; +mod array_positions; +mod array_range_access; +mod array_remove; +mod array_replace; +mod array_sort; +mod array_sum; +mod array_to_string; +mod ascii; +mod bitwise_op; +mod cardinality; +mod cast; +mod cmp; +mod concat_op; +mod concat_ws; +mod conjunction; +mod date_trunc; +mod delay; +mod encdec; +mod exp; +mod extract; +mod format; +mod format_type; +mod int256; +mod jsonb_access; +mod jsonb_concat; +mod jsonb_info; +mod length; +mod lower; +mod md5; +mod overlay; +mod position; +mod proctime; +pub mod regexp; +mod repeat; +mod replace; +mod round; +mod sha; +mod split_part; +mod string; +mod string_to_array; +mod substr; +mod timestamptz; +mod to_char; +mod to_timestamp; +mod translate; +mod trigonometric; +mod trim; +mod trim_array; +mod tumble; +mod upper; diff --git a/src/expr/src/vector_op/overlay.rs b/src/expr/impl/src/scalar/overlay.rs similarity index 94% rename from src/expr/src/vector_op/overlay.rs rename to src/expr/impl/src/scalar/overlay.rs index 79501a52f2d0e..509740892ffd0 100644 --- a/src/expr/src/vector_op/overlay.rs +++ b/src/expr/impl/src/scalar/overlay.rs @@ -14,9 +14,7 @@ use std::fmt::Write; -use risingwave_expr_macro::function; - -use crate::{ExprError, Result}; +use risingwave_expr::{function, ExprError, Result}; /// Replaces a substring of the given string with a new substring. /// @@ -26,8 +24,8 @@ use crate::{ExprError, Result}; /// ---- /// αβ💯δεζ /// ``` -#[function("overlay(varchar, varchar, int32) -> varchar")] -pub fn overlay(s: &str, new_sub_str: &str, start: i32, writer: &mut dyn Write) -> Result<()> { +#[function("overlay(varchar, varchar, int4) -> varchar")] +pub fn overlay(s: &str, new_sub_str: &str, start: i32, writer: &mut impl Write) -> Result<()> { let sub_len = new_sub_str .chars() .count() @@ -82,13 +80,13 @@ pub fn overlay(s: &str, new_sub_str: &str, start: i32, writer: &mut dyn Write) - /// ---- /// αβγ①②③αβγδεζ /// ``` -#[function("overlay(varchar, varchar, int32, int32) -> varchar")] +#[function("overlay(varchar, varchar, int4, int4) -> varchar")] pub fn overlay_for( s: &str, new_sub_str: &str, start: i32, count: i32, - writer: &mut dyn Write, + writer: &mut impl Write, ) -> Result<()> { if start <= 0 { return Err(ExprError::InvalidParam { diff --git a/src/expr/src/vector_op/position.rs b/src/expr/impl/src/scalar/position.rs similarity index 91% rename from src/expr/src/vector_op/position.rs rename to src/expr/impl/src/scalar/position.rs index 0700c4e847e97..f495a64def783 100644 --- a/src/expr/src/vector_op/position.rs +++ b/src/expr/impl/src/scalar/position.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr_macro::function; +use risingwave_expr::function; /// Returns the index of the first occurrence of the specified substring in the input string, /// or zero if the substring is not present. @@ -45,8 +45,8 @@ use risingwave_expr_macro::function; /// ---- /// 4 /// ``` -#[function("strpos(varchar, varchar) -> int32", deprecated)] -#[function("position(varchar, varchar) -> int32")] +#[function("strpos(varchar, varchar) -> int4", deprecated)] +#[function("position(varchar, varchar) -> int4")] pub fn position(str: &str, sub_str: &str) -> i32 { match str.find(sub_str) { Some(byte_idx) => (str[..byte_idx].chars().count() + 1) as i32, diff --git a/src/expr/impl/src/scalar/proctime.rs b/src/expr/impl/src/scalar/proctime.rs new file mode 100644 index 0000000000000..659a64f4e0c7b --- /dev/null +++ b/src/expr/impl/src/scalar/proctime.rs @@ -0,0 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::types::Timestamptz; +use risingwave_common::util::epoch; +use risingwave_expr::{function, ExprError, Result}; + +/// Get the processing time in Timestamptz scalar from the task-local epoch. +#[function("proctime() -> timestamptz", volatile)] +fn proctime() -> Result { + let epoch = epoch::task_local::curr_epoch().ok_or(ExprError::Context("EPOCH"))?; + Ok(epoch.as_timestamptz()) +} + +#[cfg(test)] +mod tests { + use risingwave_common::types::Timestamptz; + use risingwave_common::util::epoch::{Epoch, EpochPair}; + + use super::*; + + #[tokio::test] + async fn test_proctime() { + let curr_epoch = Epoch::now(); + let epoch = EpochPair { + curr: curr_epoch.0, + prev: 0, + }; + + let proctime = epoch::task_local::scope(epoch, async { proctime().unwrap() }).await; + + assert_eq!( + proctime, + Timestamptz::from_millis(curr_epoch.as_unix_millis() as i64).unwrap() + ); + } +} diff --git a/src/expr/impl/src/scalar/regexp.rs b/src/expr/impl/src/scalar/regexp.rs new file mode 100644 index 0000000000000..d919334fe2c98 --- /dev/null +++ b/src/expr/impl/src/scalar/regexp.rs @@ -0,0 +1,523 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Regular expression functions. + +use std::str::FromStr; + +use fancy_regex::{Regex, RegexBuilder}; +use risingwave_common::array::ListValue; +use risingwave_common::types::ScalarImpl; +use risingwave_expr::{bail, function, ExprError, Result}; + +#[derive(Debug)] +pub struct RegexpContext { + pub regex: Regex, + pub global: bool, + pub replacement: String, +} + +impl RegexpContext { + fn new(pattern: &str, flags: &str, replacement: &str) -> Result { + let options = RegexpOptions::from_str(flags)?; + + let origin = if options.case_insensitive { + format!("(?i:{})", pattern) + } else { + pattern.to_string() + }; + + Ok(Self { + regex: RegexBuilder::new(&origin) + .build() + .map_err(|e| ExprError::Parse(e.to_string().into()))?, + global: options.global, + replacement: make_replacement(replacement), + }) + } + + pub fn from_pattern(pattern: &str) -> Result { + Self::new(pattern, "", "") + } + + pub fn from_pattern_flags(pattern: &str, flags: &str) -> Result { + Self::new(pattern, flags, "") + } + + pub fn from_pattern_flags_for_count(pattern: &str, flags: &str) -> Result { + if flags.contains('g') { + bail!("regexp_count() does not support the global option"); + } + Self::new(pattern, flags, "") + } + + pub fn from_pattern_replacement(pattern: &str, replacement: &str) -> Result { + Self::new(pattern, "", replacement) + } + + pub fn from_pattern_replacement_flags( + pattern: &str, + replacement: &str, + flags: &str, + ) -> Result { + Self::new(pattern, flags, replacement) + } +} + +/// Construct the regex used to match and replace `\n` expression. +/// +/// +/// ```text +/// \& -> ${0} +/// \1 -> ${1} +/// ... +/// \9 -> ${9} +/// ``` +fn make_replacement(s: &str) -> String { + use std::fmt::Write; + let mut ret = String::with_capacity(s.len()); + let mut chars = s.chars(); + while let Some(c) = chars.next() { + if c != '\\' { + ret.push(c); + continue; + } + match chars.next() { + Some('&') => ret.push_str("${0}"), + Some(c @ '1'..='9') => write!(&mut ret, "${{{c}}}").unwrap(), + Some(c) => write!(ret, "\\{c}").unwrap(), + None => ret.push('\\'), + } + } + ret +} + +/// +#[derive(Default, Debug)] +struct RegexpOptions { + /// `c` and `i` + case_insensitive: bool, + /// `g` + global: bool, +} + +impl FromStr for RegexpOptions { + type Err = ExprError; + + fn from_str(s: &str) -> Result { + let mut opts = Self::default(); + for c in s.chars() { + match c { + // Case sensitive matching here + 'c' => opts.case_insensitive = false, + // Case insensitive matching here + 'i' => opts.case_insensitive = true, + // Global matching here + 'g' => opts.global = true, + _ => { + bail!("invalid regular expression option: \"{c}\""); + } + } + } + Ok(opts) + } +} + +#[function( + // regexp_match(source, pattern) + "regexp_match(varchar, varchar) -> varchar[]", + prebuild = "RegexpContext::from_pattern($1)?" +)] +#[function( + // regexp_match(source, pattern, flags) + "regexp_match(varchar, varchar, varchar) -> varchar[]", + prebuild = "RegexpContext::from_pattern_flags($1, $2)?" +)] +fn regexp_match(text: &str, regex: &RegexpContext) -> Option { + // If there are multiple captures, then the first one is the whole match, and should be + // ignored in PostgreSQL's behavior. + let skip_first = regex.regex.captures_len() > 1; + let capture = regex.regex.captures(text).unwrap()?; + let list = capture + .iter() + .skip(if skip_first { 1 } else { 0 }) + .map(|mat| mat.map(|m| m.as_str().into())) + .collect(); + Some(ListValue::new(list)) +} + +#[function( + // regexp_count(source, pattern) + "regexp_count(varchar, varchar) -> int4", + prebuild = "RegexpContext::from_pattern($1)?" +)] +fn regexp_count_start0(text: &str, regex: &RegexpContext) -> Result { + regexp_count(text, 1, regex) +} + +#[function( + // regexp_count(source, pattern, start) + "regexp_count(varchar, varchar, int4) -> int4", + prebuild = "RegexpContext::from_pattern($1)?" +)] +#[function( + // regexp_count(source, pattern, start, flags) + "regexp_count(varchar, varchar, int4, varchar) -> int4", + prebuild = "RegexpContext::from_pattern_flags_for_count($1, $3)?" +)] +fn regexp_count(text: &str, start: i32, regex: &RegexpContext) -> Result { + // First get the start position to count for + let start = match start { + ..=0 => { + return Err(ExprError::InvalidParam { + name: "start", + reason: start.to_string().into(), + }) + } + _ => start as usize - 1, + }; + + // Find the start byte index considering the unicode + let mut start = match text.char_indices().nth(start) { + Some((idx, _)) => idx, + // The `start` is out of bound + None => return Ok(0), + }; + + let mut count = 0; + while let Ok(Some(captures)) = regex.regex.captures(&text[start..]) { + count += 1; + start += captures.get(0).unwrap().end(); + } + Ok(count) +} + +#[function( + // regexp_replace(source, pattern, replacement) + "regexp_replace(varchar, varchar, varchar) -> varchar", + prebuild = "RegexpContext::from_pattern_replacement($1, $2)?" +)] +#[function( + // regexp_replace(source, pattern, replacement, flags) + "regexp_replace(varchar, varchar, varchar, varchar) -> varchar", + prebuild = "RegexpContext::from_pattern_replacement_flags($1, $2, $3)?" +)] +fn regexp_replace0(text: &str, ctx: &RegexpContext) -> Result> { + regexp_replace(text, 1, None, ctx) +} + +#[function( + // regexp_replace(source, pattern, replacement, start) + "regexp_replace(varchar, varchar, varchar, int4) -> varchar", + prebuild = "RegexpContext::from_pattern_replacement($1, $2)?" +)] +fn regexp_replace_with_start(text: &str, start: i32, ctx: &RegexpContext) -> Result> { + regexp_replace(text, start, None, ctx) +} + +#[function( + // regexp_replace(source, pattern, replacement, start, N) + "regexp_replace(varchar, varchar, varchar, int4, int4) -> varchar", + prebuild = "RegexpContext::from_pattern_replacement($1, $2)?" +)] +fn regexp_replace_with_start_n( + text: &str, + start: i32, + n: i32, + ctx: &RegexpContext, +) -> Result> { + regexp_replace(text, start, Some(n), ctx) +} + +#[function( + // regexp_replace(source, pattern, replacement, start, N, flags) + "regexp_replace(varchar, varchar, varchar, int4, int4, varchar) -> varchar", + prebuild = "RegexpContext::from_pattern_replacement_flags($1, $2, $5)?" +)] +fn regexp_replace_with_start_n_flags( + text: &str, + start: i32, + n: i32, + ctx: &RegexpContext, +) -> Result> { + regexp_replace(text, start, Some(n), ctx) +} + +// regexp_replace(source, pattern, replacement [, start [, N ]] [, flags ]) +fn regexp_replace( + text: &str, + start: i32, + n: Option, // `None` if not specified + ctx: &RegexpContext, +) -> Result> { + // The start position to begin the search + let start = match start { + ..=0 => { + return Err(ExprError::InvalidParam { + name: "start", + reason: start.to_string().into(), + }) + } + _ => start as usize - 1, + }; + + // This is because the source text may contain unicode + let start = match text.char_indices().nth(start) { + Some((idx, _)) => idx, + // With no match + None => return Ok(text.into()), + }; + + if n.is_none() && ctx.global || n == Some(0) { + // -------------------------------------------------------------- + // `-g` enabled (& `N` is not specified) or `N` is `0` | + // We need to replace all the occurrence of the matched pattern | + // -------------------------------------------------------------- + + // See if there is capture group or not + if ctx.regex.captures_len() <= 1 { + // There is no capture groups in the regex + // Just replace all matched patterns after `start` + Ok(format!( + "{}{}", + &text[..start], + ctx.regex.replace_all(&text[start..], &ctx.replacement) + ) + .into()) + } else { + // The position to start searching for replacement + let mut search_start = start; + + // Construct the return string + let mut ret = text[..search_start].to_string(); + + // Begin the actual replace logic + while let Ok(Some(capture)) = ctx.regex.captures(&text[search_start..]) { + let match_start = capture.get(0).unwrap().start(); + let match_end = capture.get(0).unwrap().end(); + + if match_start == match_end { + // If this is an empty match + search_start += 1; + continue; + } + + // Append the portion of the text from `search_start` to `match_start` + ret.push_str(&text[search_start..search_start + match_start]); + + // Start to replacing + // Note that the result will be written directly to `ret` buffer + capture.expand(&ctx.replacement, &mut ret); + + // Update the `search_start` + search_start += match_end; + } + + // Push the rest of the text to return string + ret.push_str(&text[search_start..]); + + Ok(ret.into()) + } + } else { + // ------------------------------------------------- + // Only replace the first matched pattern | + // Or the N-th matched pattern if `N` is specified | + // ------------------------------------------------- + + // Construct the return string + let mut ret = if start > 1 { + text[..start].to_string() + } else { + "".to_string() + }; + + // See if there is capture group or not + if ctx.regex.captures_len() <= 1 { + // There is no capture groups in the regex + if let Some(n) = n { + // Replace only the N-th match + let mut count = 1; + // The absolute index for the start of searching + let mut search_start = start; + while let Ok(Some(capture)) = ctx.regex.captures(&text[search_start..]) { + // Get the current start & end index + let match_start = capture.get(0).unwrap().start(); + let match_end = capture.get(0).unwrap().end(); + + if count == n { + // We've reached the pattern to replace + // Let's construct the return string + ret = format!( + "{}{}{}", + &text[..search_start + match_start], + &ctx.replacement, + &text[search_start + match_end..] + ); + break; + } + + // Update the counter + count += 1; + + // Update `start` + search_start += match_end; + } + } else { + // `N` is not specified + ret.push_str(&ctx.regex.replacen(&text[start..], 1, &ctx.replacement)); + } + } else { + // There are capture groups in the regex + // Reset return string at the beginning + ret = "".to_string(); + if let Some(n) = n { + // Replace only the N-th match + let mut count = 1; + while let Ok(Some(capture)) = ctx.regex.captures(&text[start..]) { + if count == n { + // We've reached the pattern to replace + let match_start = capture.get(0).unwrap().start(); + let match_end = capture.get(0).unwrap().end(); + + // Get the replaced string and expand it + capture.expand(&ctx.replacement, &mut ret); + + // Construct the return string + ret = format!( + "{}{}{}", + &text[..start + match_start], + ret, + &text[start + match_end..] + ); + } + + // Update the counter + count += 1; + } + + // If there is no match, just return the original string + if ret.is_empty() { + ret = text.into(); + } + } else { + // `N` is not specified + if let Ok(None) = ctx.regex.captures(&text[start..]) { + // No match + return Ok(text.into()); + } + + // Otherwise replace the source text + if let Ok(Some(capture)) = ctx.regex.captures(&text[start..]) { + let match_start = capture.get(0).unwrap().start(); + let match_end = capture.get(0).unwrap().end(); + + // Get the replaced string and expand it + capture.expand(&ctx.replacement, &mut ret); + + // Construct the return string + ret = format!( + "{}{}{}", + &text[..start + match_start], + ret, + &text[start + match_end..] + ); + } + } + } + + Ok(ret.into()) + } +} + +#[function( + // regexp_split_to_array(source, pattern) + "regexp_split_to_array(varchar, varchar) -> varchar[]", + prebuild = "RegexpContext::from_pattern($1)?" +)] +#[function( + // regexp_split_to_array(source, pattern, flags) + "regexp_split_to_array(varchar, varchar, varchar) -> varchar[]", + prebuild = "RegexpContext::from_pattern_flags($1, $2)?" +)] +fn regexp_split_to_array(text: &str, regex: &RegexpContext) -> Option { + let n = text.len(); + let mut start = 0; + let mut list: Vec> = Vec::new(); + let mut empty_flag = false; + + loop { + if start >= n { + // Prevent overflow + break; + } + + let capture = regex.regex.captures(&text[start..]).unwrap(); + + if capture.is_none() { + break; + } + + let whole_match = capture.unwrap().get(0); + debug_assert!(whole_match.is_some(), "Expected `whole_match` to be valid"); + + let begin = whole_match.unwrap().start() + start; + let end = whole_match.unwrap().end() + start; + + if begin == end { + // Empty match (i.e., `\s*`) + empty_flag = true; + + if begin == text.len() { + // We do not need to push extra stuff to the result list + start = begin; + break; + } + list.push(Some(text[start..begin + 1].into())); + start = end + 1; + continue; + } + + if start == begin { + // The before match is possibly empty + if !empty_flag { + // We'll push an empty string to conform with postgres + // If there does not exists a empty match before + list.push(Some("".to_string().into())); + } + start = end; + continue; + } + + if begin != 0 { + // Normal case + list.push(Some(text[start..begin].into())); + } + + // We should update the `start` no matter `begin` is zero or not + start = end; + } + + if start < n { + // Push the extra text to the list + // Note that this will implicitly push the entire text to the list + // If there is no match, which is the expected behavior + list.push(Some(text[start..].into())); + } + + if start == n && !empty_flag { + list.push(Some("".to_string().into())); + } + + Some(ListValue::new(list)) +} diff --git a/src/expr/src/vector_op/repeat.rs b/src/expr/impl/src/scalar/repeat.rs similarity index 88% rename from src/expr/src/vector_op/repeat.rs rename to src/expr/impl/src/scalar/repeat.rs index b46076b8eeefd..9791bf1d8124a 100644 --- a/src/expr/src/vector_op/repeat.rs +++ b/src/expr/impl/src/scalar/repeat.rs @@ -14,10 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; -#[function("repeat(varchar, int32) -> varchar")] -pub fn repeat(s: &str, count: i32, writer: &mut dyn Write) { +#[function("repeat(varchar, int4) -> varchar")] +pub fn repeat(s: &str, count: i32, writer: &mut impl Write) { for _ in 0..count { writer.write_str(s).unwrap(); } diff --git a/src/expr/src/vector_op/replace.rs b/src/expr/impl/src/scalar/replace.rs similarity index 97% rename from src/expr/src/vector_op/replace.rs rename to src/expr/impl/src/scalar/replace.rs index 02eeefdc8490e..d3493ae5353fe 100644 --- a/src/expr/src/vector_op/replace.rs +++ b/src/expr/impl/src/scalar/replace.rs @@ -14,10 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("replace(varchar, varchar, varchar) -> varchar")] -pub fn replace(s: &str, from_str: &str, to_str: &str, writer: &mut dyn Write) { +pub fn replace(s: &str, from_str: &str, to_str: &str, writer: &mut impl Write) { if from_str.is_empty() { writer.write_str(s).unwrap(); return; diff --git a/src/expr/src/vector_op/round.rs b/src/expr/impl/src/scalar/round.rs similarity index 93% rename from src/expr/src/vector_op/round.rs rename to src/expr/impl/src/scalar/round.rs index 5ad8c836e5696..fb010933e1e7b 100644 --- a/src/expr/src/vector_op/round.rs +++ b/src/expr/impl/src/scalar/round.rs @@ -13,11 +13,9 @@ // limitations under the License. use risingwave_common::types::{Decimal, F64}; -use risingwave_expr_macro::function; +use risingwave_expr::{function, ExprError, Result}; -use crate::{ExprError, Result}; - -#[function("round_digit(decimal, int32) -> decimal")] +#[function("round_digit(decimal, int4) -> decimal")] pub fn round_digits(input: Decimal, digits: i32) -> Result { if digits < 0 { input @@ -29,7 +27,7 @@ pub fn round_digits(input: Decimal, digits: i32) -> Result { } } -#[function("ceil(float64) -> float64")] +#[function("ceil(float8) -> float8")] pub fn ceil_f64(input: F64) -> F64 { f64::ceil(input.0).into() } @@ -39,7 +37,7 @@ pub fn ceil_decimal(input: Decimal) -> Decimal { input.ceil() } -#[function("floor(float64) -> float64")] +#[function("floor(float8) -> float8")] pub fn floor_f64(input: F64) -> F64 { f64::floor(input.0).into() } @@ -49,7 +47,7 @@ pub fn floor_decimal(input: Decimal) -> Decimal { input.floor() } -#[function("trunc(float64) -> float64")] +#[function("trunc(float8) -> float8")] pub fn trunc_f64(input: F64) -> F64 { f64::trunc(input.0).into() } @@ -60,7 +58,7 @@ pub fn trunc_decimal(input: Decimal) -> Decimal { } // Ties are broken by rounding away from zero -#[function("round(float64) -> float64")] +#[function("round(float8) -> float8")] pub fn round_f64(input: F64) -> F64 { f64::round_ties_even(input.0).into() } @@ -78,7 +76,7 @@ mod tests { use risingwave_common::types::{Decimal, F64}; use super::ceil_f64; - use crate::vector_op::round::*; + use crate::scalar::round::*; fn do_test(input: &str, digits: i32, expected_output: Option<&str>) { let v = Decimal::from_str(input).unwrap(); diff --git a/src/expr/src/vector_op/sha.rs b/src/expr/impl/src/scalar/sha.rs similarity index 69% rename from src/expr/src/vector_op/sha.rs rename to src/expr/impl/src/scalar/sha.rs index b332e5a1fb7bb..8e5358ae4dccc 100644 --- a/src/expr/src/vector_op/sha.rs +++ b/src/expr/impl/src/scalar/sha.rs @@ -12,35 +12,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr_macro::function; +use risingwave_expr::function; use sha1::{Digest, Sha1}; use sha2::{Sha224, Sha256, Sha384, Sha512}; -use crate::Result; - #[function("sha1(bytea) -> bytea")] -pub fn sha1(data: &[u8]) -> Result> { - Ok(Sha1::digest(data).to_vec().into()) +pub fn sha1(data: &[u8]) -> impl AsRef<[u8]> { + Sha1::digest(data) } #[function("sha224(bytea) -> bytea")] -pub fn sha224(data: &[u8]) -> Result> { - Ok(Sha224::digest(data).to_vec().into()) +pub fn sha224(data: &[u8]) -> impl AsRef<[u8]> { + Sha224::digest(data) } #[function("sha256(bytea) -> bytea")] -pub fn sha256(data: &[u8]) -> Result> { - Ok(Sha256::digest(data).to_vec().into()) +pub fn sha256(data: &[u8]) -> impl AsRef<[u8]> { + Sha256::digest(data) } #[function("sha384(bytea) -> bytea")] -pub fn sha384(data: &[u8]) -> Result> { - Ok(Sha384::digest(data).to_vec().into()) +pub fn sha384(data: &[u8]) -> impl AsRef<[u8]> { + Sha384::digest(data) } #[function("sha512(bytea) -> bytea")] -pub fn sha512(data: &[u8]) -> Result> { - Ok(Sha512::digest(data).to_vec().into()) +pub fn sha512(data: &[u8]) -> impl AsRef<[u8]> { + Sha512::digest(data) } #[cfg(test)] @@ -54,9 +52,8 @@ mod tests { )]; for (ori, encoded) in cases { - let t = sha1(ori).unwrap(); - let slice: &[u8] = &t; - assert_eq!(slice, encoded); + let t = sha1(ori); + assert_eq!(t.as_ref(), encoded); } } @@ -67,9 +64,8 @@ mod tests { ]; for (ori, encoded) in cases { - let t = sha224(ori).unwrap(); - let slice: &[u8] = &t; - assert_eq!(slice, encoded); + let t = sha224(ori); + assert_eq!(t.as_ref(), encoded); } } @@ -80,9 +76,8 @@ mod tests { ]; for (ori, encoded) in cases { - let t = sha256(ori).unwrap(); - let slice: &[u8] = &t; - assert_eq!(slice, encoded); + let t = sha256(ori); + assert_eq!(t.as_ref(), encoded); } } @@ -93,9 +88,8 @@ mod tests { ]; for (ori, encoded) in cases { - let t = sha384(ori).unwrap(); - let slice: &[u8] = &t; - assert_eq!(slice, encoded); + let t = sha384(ori); + assert_eq!(t.as_ref(), encoded); } } @@ -106,9 +100,8 @@ mod tests { ]; for (ori, encoded) in cases { - let t = sha512(ori).unwrap(); - let slice: &[u8] = &t; - assert_eq!(slice, encoded); + let t = sha512(ori); + assert_eq!(t.as_ref(), encoded); } } } diff --git a/src/expr/src/vector_op/split_part.rs b/src/expr/impl/src/scalar/split_part.rs similarity index 96% rename from src/expr/src/vector_op/split_part.rs rename to src/expr/impl/src/scalar/split_part.rs index 72782c65ba1e8..f9d4e976acde2 100644 --- a/src/expr/src/vector_op/split_part.rs +++ b/src/expr/impl/src/scalar/split_part.rs @@ -14,16 +14,14 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::{function, ExprError, Result}; -use crate::{ExprError, Result}; - -#[function("split_part(varchar, varchar, int32) -> varchar")] +#[function("split_part(varchar, varchar, int4) -> varchar")] pub fn split_part( string_expr: &str, delimiter_expr: &str, nth_expr: i32, - writer: &mut dyn Write, + writer: &mut impl Write, ) -> Result<()> { if nth_expr == 0 { return Err(ExprError::InvalidParam { diff --git a/src/expr/src/vector_op/string.rs b/src/expr/impl/src/scalar/string.rs similarity index 91% rename from src/expr/src/vector_op/string.rs rename to src/expr/impl/src/scalar/string.rs index edff1207db89a..e2ed03af0e768 100644 --- a/src/expr/src/vector_op/string.rs +++ b/src/expr/impl/src/scalar/string.rs @@ -18,7 +18,7 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; /// Returns the character with the specified Unicode code point. /// @@ -30,8 +30,8 @@ use risingwave_expr_macro::function; /// ---- /// A /// ``` -#[function("chr(int32) -> varchar")] -pub fn chr(code: i32, writer: &mut dyn Write) { +#[function("chr(int4) -> varchar")] +pub fn chr(code: i32, writer: &mut impl Write) { if let Some(c) = std::char::from_u32(code as u32) { write!(writer, "{}", c).unwrap(); } @@ -73,7 +73,7 @@ pub fn starts_with(s: &str, prefix: &str) -> bool { /// The Quick Brown Fox /// ``` #[function("initcap(varchar) -> varchar")] -pub fn initcap(s: &str, writer: &mut dyn Write) { +pub fn initcap(s: &str, writer: &mut impl Write) { let mut capitalize_next = true; for c in s.chars() { if capitalize_next { @@ -104,8 +104,8 @@ pub fn initcap(s: &str, writer: &mut dyn Write) { /// ---- /// abc /// ``` -#[function("lpad(varchar, int32) -> varchar")] -pub fn lpad(s: &str, length: i32, writer: &mut dyn Write) { +#[function("lpad(varchar, int4) -> varchar")] +pub fn lpad(s: &str, length: i32, writer: &mut impl Write) { lpad_fill(s, length, " ", writer); } @@ -125,8 +125,8 @@ pub fn lpad(s: &str, length: i32, writer: &mut dyn Write) { /// ---- /// hi /// ``` -#[function("lpad(varchar, int32, varchar) -> varchar")] -pub fn lpad_fill(s: &str, length: i32, fill: &str, writer: &mut dyn Write) { +#[function("lpad(varchar, int4, varchar) -> varchar")] +pub fn lpad_fill(s: &str, length: i32, fill: &str, writer: &mut impl Write) { let s_len = s.chars().count(); let fill_len = fill.chars().count(); @@ -168,8 +168,8 @@ pub fn lpad_fill(s: &str, length: i32, fill: &str, writer: &mut dyn Write) { /// ---- /// abc /// ``` -#[function("rpad(varchar, int32) -> varchar")] -pub fn rpad(s: &str, length: i32, writer: &mut dyn Write) { +#[function("rpad(varchar, int4) -> varchar")] +pub fn rpad(s: &str, length: i32, writer: &mut impl Write) { rpad_fill(s, length, " ", writer); } @@ -200,8 +200,8 @@ pub fn rpad(s: &str, length: i32, writer: &mut dyn Write) { /// ---- /// hi /// ``` -#[function("rpad(varchar, int32, varchar) -> varchar")] -pub fn rpad_fill(s: &str, length: i32, fill: &str, writer: &mut dyn Write) { +#[function("rpad(varchar, int4, varchar) -> varchar")] +pub fn rpad_fill(s: &str, length: i32, fill: &str, writer: &mut impl Write) { let s_len = s.chars().count(); let fill_len = fill.chars().count(); @@ -239,7 +239,7 @@ pub fn rpad_fill(s: &str, length: i32, fill: &str, writer: &mut dyn Write) { /// fedcba /// ``` #[function("reverse(varchar) -> varchar")] -pub fn reverse(s: &str, writer: &mut dyn Write) { +pub fn reverse(s: &str, writer: &mut impl Write) { for c in s.chars().rev() { write!(writer, "{}", c).unwrap(); } @@ -257,7 +257,7 @@ pub fn reverse(s: &str, writer: &mut dyn Write) { /// Karel /// ``` #[function("to_ascii(varchar) -> varchar")] -pub fn to_ascii(s: &str, writer: &mut dyn Write) { +pub fn to_ascii(s: &str, writer: &mut impl Write) { for c in s.chars() { let ascii = match c { 'Á' | 'À' | 'Â' | 'Ã' => 'A', @@ -319,13 +319,13 @@ pub fn to_ascii(s: &str, writer: &mut dyn Write) { /// ---- /// 8000000000000000 /// ``` -#[function("to_hex(int32) -> varchar")] -pub fn to_hex_i32(n: i32, writer: &mut dyn Write) { +#[function("to_hex(int4) -> varchar")] +pub fn to_hex_i32(n: i32, writer: &mut impl Write) { write!(writer, "{:x}", n).unwrap(); } -#[function("to_hex(int64) -> varchar")] -pub fn to_hex_i64(n: i64, writer: &mut dyn Write) { +#[function("to_hex(int8) -> varchar")] +pub fn to_hex_i64(n: i64, writer: &mut impl Write) { write!(writer, "{:x}", n).unwrap(); } @@ -365,7 +365,7 @@ pub fn to_hex_i64(n: i64, writer: &mut dyn Write) { /// select /// ``` #[function("quote_ident(varchar) -> varchar")] -pub fn quote_ident(s: &str, writer: &mut dyn Write) { +pub fn quote_ident(s: &str, writer: &mut impl Write) { let needs_quotes = s.chars().any(|c| !matches!(c, 'a'..='z' | '0'..='9' | '_')); if !needs_quotes { write!(writer, "{}", s).unwrap(); @@ -413,8 +413,8 @@ pub fn quote_ident(s: &str, writer: &mut dyn Write) { /// ---- /// (empty) /// ``` -#[function("left(varchar, int32) -> varchar")] -pub fn left(s: &str, n: i32, writer: &mut dyn Write) { +#[function("left(varchar, int4) -> varchar")] +pub fn left(s: &str, n: i32, writer: &mut impl Write) { let n = if n >= 0 { n as usize } else { @@ -458,8 +458,8 @@ pub fn left(s: &str, n: i32, writer: &mut dyn Write) { /// ---- /// (empty) /// ``` -#[function("right(varchar, int32) -> varchar")] -pub fn right(s: &str, n: i32, writer: &mut dyn Write) { +#[function("right(varchar, int4) -> varchar")] +pub fn right(s: &str, n: i32, writer: &mut impl Write) { let skip = if n >= 0 { s.chars().count().saturating_sub(n as usize) } else { diff --git a/src/expr/src/vector_op/string_to_array.rs b/src/expr/impl/src/scalar/string_to_array.rs similarity index 61% rename from src/expr/src/vector_op/string_to_array.rs rename to src/expr/impl/src/scalar/string_to_array.rs index 7e2d36aed5142..a61cbda3ddfbe 100644 --- a/src/expr/src/vector_op/string_to_array.rs +++ b/src/expr/impl/src/scalar/string_to_array.rs @@ -16,7 +16,7 @@ use auto_enums::auto_enum; use itertools::Itertools; use risingwave_common::array::ListValue; use risingwave_common::types::ScalarImpl; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[auto_enum(Iterator)] fn string_to_array_inner<'a>( @@ -35,39 +35,33 @@ fn string_to_array_inner<'a>( } // Use cases shown in `e2e_test/batch/functions/string_to_array.slt.part` -#[function("string_to_array(varchar, varchar) -> list")] +#[function("string_to_array(varchar, varchar) -> varchar[]")] pub fn string_to_array2(s: Option<&str>, sep: Option<&str>) -> Option { - s.map(|s| { - ListValue::new( - string_to_array_inner(s, sep) - .map(|x| Some(ScalarImpl::Utf8(x.into()))) - .collect_vec(), - ) - }) + Some(ListValue::new( + string_to_array_inner(s?, sep) + .map(|x| Some(ScalarImpl::Utf8(x.into()))) + .collect_vec(), + )) } -#[function("string_to_array(varchar, varchar, varchar) -> list")] +#[function("string_to_array(varchar, varchar, varchar) -> varchar[]")] pub fn string_to_array3( s: Option<&str>, sep: Option<&str>, null: Option<&str>, ) -> Option { - s.map(|s| { - null.map_or_else( - || string_to_array2(Some(s), sep).unwrap(), - |null| { - ListValue::new( - string_to_array_inner(s, sep) - .map(|x| { - if x == null { - None - } else { - Some(ScalarImpl::Utf8(x.into())) - } - }) - .collect_vec(), - ) - }, - ) - }) + let Some(null) = null else { + return string_to_array2(s, sep); + }; + Some(ListValue::new( + string_to_array_inner(s?, sep) + .map(|x| { + if x == null { + None + } else { + Some(ScalarImpl::Utf8(x.into())) + } + }) + .collect_vec(), + )) } diff --git a/src/expr/src/vector_op/substr.rs b/src/expr/impl/src/scalar/substr.rs similarity index 90% rename from src/expr/src/vector_op/substr.rs rename to src/expr/impl/src/scalar/substr.rs index c811d98fe71a4..dc2829c5d8a52 100644 --- a/src/expr/src/vector_op/substr.rs +++ b/src/expr/impl/src/scalar/substr.rs @@ -14,12 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::{function, ExprError, Result}; -use crate::{ExprError, Result}; - -#[function("substr(varchar, int32) -> varchar")] -pub fn substr_start(s: &str, start: i32, writer: &mut dyn Write) -> Result<()> { +#[function("substr(varchar, int4) -> varchar")] +pub fn substr_start(s: &str, start: i32, writer: &mut impl Write) -> Result<()> { let skip = start.saturating_sub(1).max(0) as usize; let substr = s.chars().skip(skip); @@ -30,8 +28,8 @@ pub fn substr_start(s: &str, start: i32, writer: &mut dyn Write) -> Result<()> { Ok(()) } -#[function("substr(varchar, int32, int32) -> varchar")] -pub fn substr_start_for(s: &str, start: i32, count: i32, writer: &mut dyn Write) -> Result<()> { +#[function("substr(varchar, int4, int4) -> varchar")] +pub fn substr_start_for(s: &str, start: i32, count: i32, writer: &mut impl Write) -> Result<()> { if count < 0 { return Err(ExprError::InvalidParam { name: "length", diff --git a/src/expr/src/vector_op/timestamptz.rs b/src/expr/impl/src/scalar/timestamptz.rs similarity index 95% rename from src/expr/src/vector_op/timestamptz.rs rename to src/expr/impl/src/scalar/timestamptz.rs index 716a521f742e4..a14eb0a36319b 100644 --- a/src/expr/src/vector_op/timestamptz.rs +++ b/src/expr/impl/src/scalar/timestamptz.rs @@ -15,11 +15,8 @@ use std::fmt::Write; use num_traits::CheckedNeg; -use risingwave_common::cast::str_to_timestamp; use risingwave_common::types::{CheckedAdd, Interval, IntoOrdered, Timestamp, Timestamptz, F64}; -use risingwave_expr_macro::function; - -use crate::{ExprError, Result}; +use risingwave_expr::{function, ExprError, Result}; /// Just a wrapper to reuse the `map_err` logic. #[inline(always)] @@ -30,7 +27,7 @@ pub fn time_zone_err(inner_err: String) -> ExprError { } } -#[function("to_timestamp(float64) -> timestamptz")] +#[function("to_timestamp(float8) -> timestamptz")] pub fn f64_sec_to_timestamptz(elem: F64) -> Result { // TODO(#4515): handle +/- infinity let micros = (elem.0 * 1e6) @@ -70,7 +67,7 @@ pub fn timestamp_at_time_zone(input: Timestamp, time_zone: &str) -> Result Result<()> { let time_zone = Timestamptz::lookup_time_zone(time_zone).map_err(time_zone_err)?; let instant_local = elem.to_datetime_in_zone(time_zone); @@ -89,7 +86,8 @@ pub fn timestamptz_to_string( pub fn str_to_timestamptz(elem: &str, time_zone: &str) -> Result { elem.parse().or_else(|_| { timestamp_at_time_zone( - str_to_timestamp(elem).map_err(|err| ExprError::Parse(err.into()))?, + elem.parse::() + .map_err(|err| ExprError::Parse(err.to_string().into()))?, time_zone, ) }) @@ -191,7 +189,6 @@ mod tests { use risingwave_common::util::iter_util::ZipEqFast; use super::*; - use crate::vector_op::cast::str_to_timestamp; #[test] fn test_time_zone_conversion() { @@ -221,7 +218,7 @@ mod tests { .skip(1) .zip_eq_fast(zones) .for_each(|(local, zone)| { - let local = str_to_timestamp(local).unwrap(); + let local = local.parse().unwrap(); let actual = timestamptz_at_time_zone(usecs, zone).unwrap(); assert_eq!(local, actual); @@ -240,9 +237,7 @@ mod tests { ("2022-03-27 02:00:00", "europe/zurich"), ("2022-03-27 02:59:00", "europe/zurich"), ] { - let local = str_to_timestamp(local).unwrap(); - - let actual = timestamp_at_time_zone(local, zone); + let actual = timestamp_at_time_zone(local.parse().unwrap(), zone); assert!(actual.is_err()); } } @@ -262,7 +257,7 @@ mod tests { ]; for (instant, local, zone, preferred) in test_cases { let usecs = str_to_timestamptz(instant, "UTC").unwrap(); - let local = str_to_timestamp(local).unwrap(); + let local = local.parse().unwrap(); let actual = timestamptz_at_time_zone(usecs, zone).unwrap(); assert_eq!(local, actual); diff --git a/src/expr/impl/src/scalar/to_char.rs b/src/expr/impl/src/scalar/to_char.rs new file mode 100644 index 0000000000000..9d28d62eca7a6 --- /dev/null +++ b/src/expr/impl/src/scalar/to_char.rs @@ -0,0 +1,149 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::{Debug, Write}; +use std::sync::LazyLock; + +use aho_corasick::{AhoCorasick, AhoCorasickBuilder}; +use chrono::format::StrftimeItems; +use risingwave_common::types::{DataType, Timestamp, Timestamptz}; +use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::{build_function, function, ExprError, Result}; + +use super::timestamptz::time_zone_err; + +type Pattern<'a> = Vec>; + +self_cell::self_cell! { + pub struct ChronoPattern { + owner: String, + #[covariant] + dependent: Pattern, + } +} + +impl Debug for ChronoPattern { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ChronoPattern") + .field("tmpl", self.borrow_owner()) + .finish() + } +} + +impl ChronoPattern { + /// Compile the pg pattern to chrono pattern. + // TODO: Chrono can not fully support the pg format, so consider using other implementations + // later. + pub fn compile(tmpl: &str) -> ChronoPattern { + // mapping from pg pattern to chrono pattern + // pg pattern: https://www.postgresql.org/docs/current/functions-formatting.html + // chrono pattern: https://docs.rs/chrono/latest/chrono/format/strftime/index.html + const PATTERNS: &[(&str, &str)] = &[ + ("HH24", "%H"), + ("hh24", "%H"), + ("HH12", "%I"), + ("hh12", "%I"), + ("HH", "%I"), + ("hh", "%I"), + ("AM", "%p"), + ("PM", "%p"), + ("am", "%P"), + ("pm", "%P"), + ("MI", "%M"), + ("mi", "%M"), + ("SS", "%S"), + ("ss", "%S"), + ("YYYY", "%Y"), + ("yyyy", "%Y"), + ("YY", "%y"), + ("yy", "%y"), + ("IYYY", "%G"), + ("iyyy", "%G"), + ("IY", "%g"), + ("iy", "%g"), + ("MM", "%m"), + ("mm", "%m"), + ("Month", "%B"), + ("Mon", "%b"), + ("DD", "%d"), + ("dd", "%d"), + ("US", "%6f"), + ("us", "%6f"), + ("MS", "%3f"), + ("ms", "%3f"), + ("TZH:TZM", "%:z"), + ("tzh:tzm", "%:z"), + ("TZHTZM", "%z"), + ("tzhtzm", "%z"), + ("TZH", "%#z"), + ("tzh", "%#z"), + ]; + // build an Aho-Corasick automaton for fast matching + static AC: LazyLock = LazyLock::new(|| { + AhoCorasickBuilder::new() + .ascii_case_insensitive(false) + .match_kind(aho_corasick::MatchKind::LeftmostLongest) + .build(PATTERNS.iter().map(|(k, _)| k)) + .expect("failed to build an Aho-Corasick automaton") + }); + + // replace all pg patterns with chrono patterns + let mut chrono_tmpl = String::new(); + AC.replace_all_with(tmpl, &mut chrono_tmpl, |mat, _, dst| { + dst.push_str(PATTERNS[mat.pattern()].1); + true + }); + tracing::debug!(tmpl, chrono_tmpl, "compile_pattern_to_chrono"); + ChronoPattern::new(chrono_tmpl, |tmpl| { + StrftimeItems::new(tmpl).collect::>() + }) + } +} + +#[function( + "to_char(timestamp, varchar) -> varchar", + prebuild = "ChronoPattern::compile($1)" +)] +fn timestamp_to_char(data: Timestamp, pattern: &ChronoPattern, writer: &mut impl Write) { + let format = data.0.format_with_items(pattern.borrow_dependent().iter()); + write!(writer, "{}", format).unwrap(); +} + +// Only to register this signature to function signature map. +#[build_function("to_char(timestamptz, varchar) -> varchar")] +fn timestamptz_to_char( + _return_type: DataType, + _children: Vec, +) -> Result { + Err(ExprError::UnsupportedFunction( + "to_char(timestamptz, varchar) should have been rewritten to include timezone".into(), + )) +} + +#[function( + "to_char(timestamptz, varchar, varchar) -> varchar", + prebuild = "ChronoPattern::compile($1)" +)] +fn timestamptz_to_char3( + data: Timestamptz, + zone: &str, + tmpl: &ChronoPattern, + writer: &mut impl Write, +) -> Result<()> { + let format = data + .to_datetime_in_zone(Timestamptz::lookup_time_zone(zone).map_err(time_zone_err)?) + .format_with_items(tmpl.borrow_dependent().iter()); + write!(writer, "{}", format).unwrap(); + Ok(()) +} diff --git a/src/expr/src/vector_op/to_timestamp.rs b/src/expr/impl/src/scalar/to_timestamp.rs similarity index 73% rename from src/expr/src/vector_op/to_timestamp.rs rename to src/expr/impl/src/scalar/to_timestamp.rs index 43c3c14d5fee3..bc93720373c74 100644 --- a/src/expr/src/vector_op/to_timestamp.rs +++ b/src/expr/impl/src/scalar/to_timestamp.rs @@ -13,12 +13,12 @@ // limitations under the License. use chrono::format::Parsed; -use risingwave_common::types::{Date, Timestamp, Timestamptz}; +use risingwave_common::types::{DataType, Date, Timestamp, Timestamptz}; +use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::{build_function, function, ExprError, Result}; -// use risingwave_expr_macro::function; use super::timestamptz::{timestamp_at_time_zone, timestamptz_at_time_zone}; -use super::to_char::{compile_pattern_to_chrono, ChronoPattern}; -use crate::Result; +use super::to_char::ChronoPattern; /// Parse the input string with the given chrono pattern. #[inline(always)] @@ -66,8 +66,12 @@ fn parse(s: &str, tmpl: &ChronoPattern) -> Result { Ok(parsed) } -#[inline(always)] -pub fn to_timestamp_const_tmpl_legacy(s: &str, tmpl: &ChronoPattern) -> Result { +#[function( + "to_timestamp1(varchar, varchar) -> timestamp", + prebuild = "ChronoPattern::compile($1)", + deprecated +)] +pub fn to_timestamp_legacy(s: &str, tmpl: &ChronoPattern) -> Result { let parsed = parse(s, tmpl)?; match parsed.offset { None => Ok(parsed.to_naive_datetime_with_offset(0)?.into()), @@ -77,12 +81,11 @@ pub fn to_timestamp_const_tmpl_legacy(s: &str, tmpl: &ChronoPattern) -> Result Result { +#[function( + "to_timestamp1(varchar, varchar, varchar) -> timestamptz", + prebuild = "ChronoPattern::compile($1)" +)] +pub fn to_timestamp(s: &str, timezone: &str, tmpl: &ChronoPattern) -> Result { let parsed = parse(s, tmpl)?; Ok(match parsed.offset { Some(_) => parsed.to_datetime()?.into(), @@ -91,8 +94,19 @@ pub fn to_timestamp_const_tmpl( }) } -#[inline(always)] -pub fn to_date_const_tmpl(s: &str, tmpl: &ChronoPattern) -> Result { +// Only to register this signature to function signature map. +#[build_function("to_timestamp1(varchar, varchar) -> timestamptz")] +fn build_dummy(_return_type: DataType, _children: Vec) -> Result { + Err(ExprError::UnsupportedFunction( + "to_timestamp should have been rewritten to include timezone".into(), + )) +} + +#[function( + "char_to_date(varchar, varchar) -> date", + prebuild = "ChronoPattern::compile($1)" +)] +pub fn to_date(s: &str, tmpl: &ChronoPattern) -> Result { let mut parsed = parse(s, tmpl)?; if let Some(year) = &mut parsed.year && *year < 0 { *year += 1; @@ -100,30 +114,12 @@ pub fn to_date_const_tmpl(s: &str, tmpl: &ChronoPattern) -> Result { Ok(parsed.to_naive_date()?.into()) } -// #[function("to_timestamp1(varchar, varchar) -> timestamp")] -pub fn to_timestamp_legacy(s: &str, tmpl: &str) -> Result { - let pattern = compile_pattern_to_chrono(tmpl); - to_timestamp_const_tmpl_legacy(s, &pattern) -} - -// #[function("to_timestamp1(varchar, varchar, varchar) -> timestamptz")] -pub fn to_timestamp(s: &str, tmpl: &str, timezone: &str) -> Result { - let pattern = compile_pattern_to_chrono(tmpl); - to_timestamp_const_tmpl(s, &pattern, timezone) -} - -// #[function("to_date(varchar, varchar) -> date")] -pub fn to_date(s: &str, tmpl: &str) -> Result { - let pattern = compile_pattern_to_chrono(tmpl); - to_date_const_tmpl(s, &pattern) -} - #[cfg(test)] mod tests { use super::*; - #[tokio::test] - async fn test_to_timestamp_legacy() { + #[test] + fn test_to_timestamp_legacy() { // This legacy expr can no longer be build by frontend, so we test its backward compatible // behavior in unit tests rather than e2e slt. for (input, format, expected) in [ @@ -138,7 +134,7 @@ mod tests { "2020-02-03 09:34:56", ), ] { - let actual = to_timestamp_legacy(input, format).unwrap(); + let actual = to_timestamp_legacy(input, &ChronoPattern::compile(format)).unwrap(); assert_eq!(actual.to_string(), expected); } } diff --git a/src/expr/src/vector_op/translate.rs b/src/expr/impl/src/scalar/translate.rs similarity index 97% rename from src/expr/src/vector_op/translate.rs rename to src/expr/impl/src/scalar/translate.rs index 8dcec75307946..56b9ba620ce1e 100644 --- a/src/expr/src/vector_op/translate.rs +++ b/src/expr/impl/src/scalar/translate.rs @@ -15,10 +15,10 @@ use std::collections::HashMap; use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("translate(varchar, varchar, varchar) -> varchar")] -pub fn translate(s: &str, match_str: &str, replace_str: &str, writer: &mut dyn Write) { +pub fn translate(s: &str, match_str: &str, replace_str: &str, writer: &mut impl Write) { let mut char_map = HashMap::new(); let mut match_chars = match_str.chars(); let mut replace_chars = replace_str.chars(); diff --git a/src/expr/src/vector_op/trigonometric.rs b/src/expr/impl/src/scalar/trigonometric.rs similarity index 94% rename from src/expr/src/vector_op/trigonometric.rs rename to src/expr/impl/src/scalar/trigonometric.rs index 671c56e0f1911..a5bc891a052da 100644 --- a/src/expr/src/vector_op/trigonometric.rs +++ b/src/expr/impl/src/scalar/trigonometric.rs @@ -13,65 +13,65 @@ // limitations under the License. use risingwave_common::types::F64; -use risingwave_expr_macro::function; +use risingwave_expr::function; -#[function("sin(float64) -> float64")] +#[function("sin(float8) -> float8")] pub fn sin_f64(input: F64) -> F64 { f64::sin(input.0).into() } -#[function("cos(float64) -> float64")] +#[function("cos(float8) -> float8")] pub fn cos_f64(input: F64) -> F64 { f64::cos(input.0).into() } -#[function("tan(float64) -> float64")] +#[function("tan(float8) -> float8")] pub fn tan_f64(input: F64) -> F64 { f64::tan(input.0).into() } -#[function("cot(float64) -> float64")] +#[function("cot(float8) -> float8")] pub fn cot_f64(input: F64) -> F64 { let res = 1.0 / f64::tan(input.0); res.into() } -#[function("asin(float64) -> float64")] +#[function("asin(float8) -> float8")] pub fn asin_f64(input: F64) -> F64 { f64::asin(input.0).into() } -#[function("acos(float64) -> float64")] +#[function("acos(float8) -> float8")] pub fn acos_f64(input: F64) -> F64 { f64::acos(input.0).into() } -#[function("atan(float64) -> float64")] +#[function("atan(float8) -> float8")] pub fn atan_f64(input: F64) -> F64 { f64::atan(input.0).into() } -#[function("atan2(float64, float64) -> float64")] +#[function("atan2(float8, float8) -> float8")] pub fn atan2_f64(input_x: F64, input_y: F64) -> F64 { input_x.0.atan2(input_y.0).into() } -#[function("sinh(float64) -> float64")] +#[function("sinh(float8) -> float8")] pub fn sinh_f64(input: F64) -> F64 { f64::sinh(input.0).into() } -#[function("cosh(float64) -> float64")] +#[function("cosh(float8) -> float8")] pub fn cosh_f64(input: F64) -> F64 { f64::cosh(input.0).into() } -#[function("tanh(float64) -> float64")] +#[function("tanh(float8) -> float8")] pub fn tanh_f64(input: F64) -> F64 { f64::tanh(input.0).into() } -#[function("coth(float64) -> float64")] +#[function("coth(float8) -> float8")] pub fn coth_f64(input: F64) -> F64 { if input.0 == 0.0 { return f64::NAN.into(); @@ -80,17 +80,17 @@ pub fn coth_f64(input: F64) -> F64 { (f64::cosh(input.0) / f64::sinh(input.0)).into() } -#[function("asinh(float64) -> float64")] +#[function("asinh(float8) -> float8")] pub fn asinh_f64(input: F64) -> F64 { f64::asinh(input.0).into() } -#[function("acosh(float64) -> float64")] +#[function("acosh(float8) -> float8")] pub fn acosh_f64(input: F64) -> F64 { f64::acosh(input.0).into() } -#[function("atanh(float64) -> float64")] +#[function("atanh(float8) -> float8")] pub fn atanh_f64(input: F64) -> F64 { f64::atanh(input.0).into() } @@ -135,7 +135,7 @@ fn cosd_q1(x: f64) -> f64 { } } -#[function("cosd(float64) -> float64")] +#[function("cosd(float8) -> float8")] pub fn cosd_f64(input: F64) -> F64 { // See PSQL implementation: https://github.com/postgres/postgres/blob/78ec02d612a9b69039ec2610740f738968fe144d/src/backend/utils/adt/float.c let arg1 = input.0; @@ -188,7 +188,7 @@ fn sind_q1(input: f64) -> f64 { } } -#[function("sind(float64) -> float64")] +#[function("sind(float8) -> float8")] pub fn sind_f64(input: F64) -> F64 { // PSQL implementation: https://github.com/postgres/postgres/blob/REL_15_2/src/backend/utils/adt/float.c#L2444 @@ -225,7 +225,7 @@ pub fn sind_f64(input: F64) -> F64 { } } -#[function("cotd(float64) -> float64")] +#[function("cotd(float8) -> float8")] pub fn cotd_f64(input: F64) -> F64 { // PSQL implementation: https://github.com/postgres/postgres/blob/78ec02d612a9b69039ec2610740f738968fe144d/src/backend/utils/adt/float.c#L2378 @@ -266,7 +266,7 @@ pub fn cotd_f64(input: F64) -> F64 { result.into() } -#[function("tand(float64) -> float64")] +#[function("tand(float8) -> float8")] pub fn tand_f64(input: F64) -> F64 { // PSQL implementation: https://github.com/postgres/postgres/blob/REL_15_2/src/backend/utils/adt/float.c // Returns NaN if input is NaN or infinite. Different from PSQL implementation. @@ -322,7 +322,7 @@ pub fn asind_q1(x: f64) -> f64 { 90.0 - (acos_x / ASIN_0_5) * 60.0 } -#[function("asind(float64) -> float64")] +#[function("asind(float8) -> float8")] pub fn asind_f64(input: F64) -> F64 { let arg1 = input.0; @@ -348,12 +348,12 @@ pub fn asind_f64(input: F64) -> F64 { result.into() } -#[function("degrees(float64) -> float64")] +#[function("degrees(float8) -> float8")] pub fn degrees_f64(input: F64) -> F64 { input.0.to_degrees().into() } -#[function("radians(float64) -> float64")] +#[function("radians(float8) -> float8")] pub fn radians_f64(input: F64) -> F64 { input.0.to_radians().into() } @@ -364,7 +364,7 @@ mod tests { use risingwave_common::types::{FloatExt, F64}; - use crate::vector_op::trigonometric::*; + use crate::scalar::trigonometric::*; fn precision() -> f64 { 1e-13 diff --git a/src/expr/src/vector_op/trim.rs b/src/expr/impl/src/scalar/trim.rs similarity index 94% rename from src/expr/src/vector_op/trim.rs rename to src/expr/impl/src/scalar/trim.rs index 754d90650f69c..8d3acb9324fc0 100644 --- a/src/expr/src/vector_op/trim.rs +++ b/src/expr/impl/src/scalar/trim.rs @@ -14,10 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("trim(varchar) -> varchar")] -pub fn trim(s: &str, writer: &mut dyn Write) { +pub fn trim(s: &str, writer: &mut impl Write) { writer.write_str(s.trim()).unwrap(); } @@ -25,7 +25,7 @@ pub fn trim(s: &str, writer: &mut dyn Write) { /// are actually different when the string is in right-to-left languages like Arabic or Hebrew. /// Since we would like to simplify the implementation, currently we omit this case. #[function("ltrim(varchar) -> varchar")] -pub fn ltrim(s: &str, writer: &mut dyn Write) { +pub fn ltrim(s: &str, writer: &mut impl Write) { writer.write_str(s.trim_start()).unwrap(); } @@ -33,12 +33,12 @@ pub fn ltrim(s: &str, writer: &mut dyn Write) { /// are actually different when the string is in right-to-left languages like Arabic or Hebrew. /// Since we would like to simplify the implementation, currently we omit this case. #[function("rtrim(varchar) -> varchar")] -pub fn rtrim(s: &str, writer: &mut dyn Write) { +pub fn rtrim(s: &str, writer: &mut impl Write) { writer.write_str(s.trim_end()).unwrap(); } #[function("trim(varchar, varchar) -> varchar")] -pub fn trim_characters(s: &str, characters: &str, writer: &mut dyn Write) { +pub fn trim_characters(s: &str, characters: &str, writer: &mut impl Write) { let pattern = |c| characters.chars().any(|ch| ch == c); // We remark that feeding a &str and a slice of chars into trim_left/right_matches // means different, one is matching with the entire string and the other one is matching @@ -47,13 +47,13 @@ pub fn trim_characters(s: &str, characters: &str, writer: &mut dyn Write) { } #[function("ltrim(varchar, varchar) -> varchar")] -pub fn ltrim_characters(s: &str, characters: &str, writer: &mut dyn Write) { +pub fn ltrim_characters(s: &str, characters: &str, writer: &mut impl Write) { let pattern = |c| characters.chars().any(|ch| ch == c); writer.write_str(s.trim_start_matches(pattern)).unwrap(); } #[function("rtrim(varchar, varchar) -> varchar")] -pub fn rtrim_characters(s: &str, characters: &str, writer: &mut dyn Write) { +pub fn rtrim_characters(s: &str, characters: &str, writer: &mut impl Write) { let pattern = |c| characters.chars().any(|ch| ch == c); writer.write_str(s.trim_end_matches(pattern)).unwrap(); } diff --git a/src/expr/src/vector_op/trim_array.rs b/src/expr/impl/src/scalar/trim_array.rs similarity index 94% rename from src/expr/src/vector_op/trim_array.rs rename to src/expr/impl/src/scalar/trim_array.rs index 0c75236a8eb0a..3a9bbed9c0562 100644 --- a/src/expr/src/vector_op/trim_array.rs +++ b/src/expr/impl/src/scalar/trim_array.rs @@ -14,10 +14,7 @@ use risingwave_common::array::{ListRef, ListValue}; use risingwave_common::types::ToOwnedDatum; -use risingwave_expr_macro::function; - -use crate::error::ExprError; -use crate::Result; +use risingwave_expr::{function, ExprError, Result}; /// Trims an array by removing the last n elements. If the array is multidimensional, only the first /// dimension is trimmed. @@ -73,7 +70,7 @@ use crate::Result; /// statement error /// select trim_array(array[1,2,3,4,5,null], true); /// ``` -#[function("trim_array(list, int32) -> list")] +#[function("trim_array(anyarray, int4) -> anyarray")] fn trim_array(array: ListRef<'_>, n: i32) -> Result { let values = array.iter(); let len_to_trim: usize = n.try_into().map_err(|_| ExprError::InvalidParam { diff --git a/src/expr/src/vector_op/tumble.rs b/src/expr/impl/src/scalar/tumble.rs similarity index 82% rename from src/expr/src/vector_op/tumble.rs rename to src/expr/impl/src/scalar/tumble.rs index 054053a5b1c32..507bbbd531ac4 100644 --- a/src/expr/src/vector_op/tumble.rs +++ b/src/expr/impl/src/scalar/tumble.rs @@ -14,15 +14,21 @@ use num_traits::Zero; use risingwave_common::types::{Date, Interval, Timestamp, Timestamptz}; -use risingwave_expr_macro::function; - -use crate::Result; +use risingwave_expr::{function, ExprError, Result}; #[inline(always)] -fn interval_to_micro_second(t: Interval) -> i64 { - t.months() as i64 * Interval::USECS_PER_MONTH - + t.days() as i64 * Interval::USECS_PER_DAY - + t.usecs() +fn interval_to_micro_second(t: Interval) -> Result { + let checked_interval_to_micro_second = || { + (t.months() as i64) + .checked_mul(Interval::USECS_PER_MONTH)? + .checked_add( + (t.days() as i64) + .checked_mul(Interval::USECS_PER_DAY)? + .checked_add(t.usecs())?, + ) + }; + + checked_interval_to_micro_second().ok_or(ExprError::NumericOutOfRange) } #[function("tumble_start(date, interval) -> timestamp")] @@ -82,15 +88,23 @@ fn get_window_start_with_offset( window_size: Interval, offset: Interval, ) -> Result { - let window_size_micro_second = interval_to_micro_second(window_size); - let offset_micro_second = interval_to_micro_second(offset); + let window_size_micro_second = interval_to_micro_second(window_size)?; + let offset_micro_second = interval_to_micro_second(offset)?; // Inspired by https://issues.apache.org/jira/browse/FLINK-26334 - let remainder = (timestamp_micro_second - offset_micro_second) % window_size_micro_second; + let remainder = timestamp_micro_second + .checked_sub(offset_micro_second) + .ok_or(ExprError::NumericOutOfRange)? + .checked_rem(window_size_micro_second) + .ok_or(ExprError::DivisionByZero)?; if remainder < 0 { - Ok(timestamp_micro_second - (remainder + window_size_micro_second)) + timestamp_micro_second + .checked_sub(remainder + window_size_micro_second) + .ok_or(ExprError::NumericOutOfRange) } else { - Ok(timestamp_micro_second - remainder) + timestamp_micro_second + .checked_sub(remainder) + .ok_or(ExprError::NumericOutOfRange) } } @@ -111,7 +125,7 @@ mod tests { use risingwave_common::types::{Date, Interval}; use super::tumble_start_offset_date_time; - use crate::vector_op::tumble::{ + use crate::scalar::tumble::{ get_window_start, interval_to_micro_second, tumble_start_date_time, }; @@ -169,7 +183,7 @@ mod tests { let window_size = Interval::from_minutes(5); let window_start = get_window_start(timestamp_micro_second, window_size).unwrap(); - let window_size_micro_second = interval_to_micro_second(window_size); + let window_size_micro_second = interval_to_micro_second(window_size).unwrap(); let default_window_start = timestamp_micro_second - (timestamp_micro_second + window_size_micro_second) % window_size_micro_second; @@ -182,4 +196,10 @@ mod tests { } assert_ne!(wrong_cnt, 0); } + + #[test] + fn test_window_start_overflow() { + get_window_start(i64::MIN, Interval::from_millis(20)).unwrap_err(); + interval_to_micro_second(Interval::from_month_day_usec(1, 1, i64::MAX)).unwrap_err(); + } } diff --git a/src/expr/src/vector_op/upper.rs b/src/expr/impl/src/scalar/upper.rs similarity index 93% rename from src/expr/src/vector_op/upper.rs rename to src/expr/impl/src/scalar/upper.rs index 45cf51ce9e327..97e0e696b509e 100644 --- a/src/expr/src/vector_op/upper.rs +++ b/src/expr/impl/src/scalar/upper.rs @@ -14,10 +14,10 @@ use std::fmt::Write; -use risingwave_expr_macro::function; +use risingwave_expr::function; #[function("upper(varchar) -> varchar")] -pub fn upper(s: &str, writer: &mut dyn Write) { +pub fn upper(s: &str, writer: &mut impl Write) { for c in s.chars() { writer.write_char(c.to_ascii_uppercase()).unwrap(); } diff --git a/src/expr/src/table_function/generate_series.rs b/src/expr/impl/src/table_function/generate_series.rs similarity index 61% rename from src/expr/src/table_function/generate_series.rs rename to src/expr/impl/src/table_function/generate_series.rs index 354349e1b03f1..dfa09b0e215b8 100644 --- a/src/expr/src/table_function/generate_series.rs +++ b/src/expr/impl/src/table_function/generate_series.rs @@ -13,14 +13,11 @@ // limitations under the License. use num_traits::One; -use risingwave_common::types::{CheckedAdd, IsNegative}; -use risingwave_expr_macro::function; +use risingwave_common::types::{CheckedAdd, Decimal, IsNegative}; +use risingwave_expr::{function, ExprError, Result}; -use super::*; - -#[function("generate_series(int32, int32) -> setof int32")] -#[function("generate_series(int64, int64) -> setof int64")] -#[function("generate_series(decimal, decimal) -> setof decimal")] +#[function("generate_series(int4, int4) -> setof int4")] +#[function("generate_series(int8, int8) -> setof int8")] fn generate_series(start: T, stop: T) -> Result>> where T: CheckedAdd + PartialOrd + Copy + One + IsNegative, @@ -28,9 +25,19 @@ where range_generic::<_, _, true>(start, stop, T::one()) } -#[function("generate_series(int32, int32, int32) -> setof int32")] -#[function("generate_series(int64, int64, int64) -> setof int64")] -#[function("generate_series(decimal, decimal, decimal) -> setof decimal")] +#[function("generate_series(decimal, decimal) -> setof decimal")] +fn generate_series_decimal( + start: Decimal, + stop: Decimal, +) -> Result>> +where +{ + validate_range_parameters(start, stop, Decimal::one())?; + range_generic::(start, stop, Decimal::one()) +} + +#[function("generate_series(int4, int4, int4) -> setof int4")] +#[function("generate_series(int8, int8, int8) -> setof int8")] #[function("generate_series(timestamp, timestamp, interval) -> setof timestamp")] fn generate_series_step(start: T, stop: T, step: S) -> Result>> where @@ -40,9 +47,18 @@ where range_generic::<_, _, true>(start, stop, step) } -#[function("range(int32, int32) -> setof int32")] -#[function("range(int64, int64) -> setof int64")] -#[function("range(decimal, decimal) -> setof decimal")] +#[function("generate_series(decimal, decimal, decimal) -> setof decimal")] +fn generate_series_step_decimal( + start: Decimal, + stop: Decimal, + step: Decimal, +) -> Result>> { + validate_range_parameters(start, stop, step)?; + range_generic::<_, _, true>(start, stop, step) +} + +#[function("range(int4, int4) -> setof int4")] +#[function("range(int8, int8) -> setof int8")] fn range(start: T, stop: T) -> Result>> where T: CheckedAdd + PartialOrd + Copy + One + IsNegative, @@ -50,9 +66,16 @@ where range_generic::<_, _, false>(start, stop, T::one()) } -#[function("range(int32, int32, int32) -> setof int32")] -#[function("range(int64, int64, int64) -> setof int64")] -#[function("range(decimal, decimal, decimal) -> setof decimal")] +#[function("range(decimal, decimal) -> setof decimal")] +fn range_decimal(start: Decimal, stop: Decimal) -> Result>> +where +{ + validate_range_parameters(start, stop, Decimal::one())?; + range_generic::(start, stop, Decimal::one()) +} + +#[function("range(int4, int4, int4) -> setof int4")] +#[function("range(int8, int8, int8) -> setof int8")] #[function("range(timestamp, timestamp, interval) -> setof timestamp")] fn range_step(start: T, stop: T, step: S) -> Result>> where @@ -62,6 +85,16 @@ where range_generic::<_, _, false>(start, stop, step) } +#[function("range(decimal, decimal, decimal) -> setof decimal")] +fn range_step_decimal( + start: Decimal, + stop: Decimal, + step: Decimal, +) -> Result>> { + validate_range_parameters(start, stop, step)?; + range_generic::<_, _, false>(start, stop, step) +} + #[inline] fn range_generic( start: T, @@ -95,14 +128,41 @@ where Ok(std::iter::from_fn(move || next().transpose())) } +#[inline] +fn validate_range_parameters(start: Decimal, stop: Decimal, step: Decimal) -> Result<()> { + validate_decimal(start, "start")?; + validate_decimal(stop, "stop")?; + validate_decimal(step, "step")?; + Ok(()) +} + +#[inline] +fn validate_decimal(decimal: Decimal, name: &'static str) -> Result<()> { + match decimal { + Decimal::Normalized(_) => Ok(()), + Decimal::PositiveInf | Decimal::NegativeInf => Err(ExprError::InvalidParam { + name, + reason: format!("{} value cannot be infinity", name).into(), + }), + Decimal::NaN => Err(ExprError::InvalidParam { + name, + reason: format!("{} value cannot be NaN", name).into(), + }), + } +} + #[cfg(test)] mod tests { - use risingwave_common::types::test_utils::IntervalTestExt; - use risingwave_common::types::{DataType, Interval, ScalarImpl, Timestamp}; + use std::str::FromStr; - use super::*; - use crate::expr::{Expression, LiteralExpression}; - use crate::vector_op::cast::str_to_timestamp; + use futures_util::StreamExt; + use risingwave_common::array::DataChunk; + use risingwave_common::types::test_utils::IntervalTestExt; + use risingwave_common::types::{DataType, Decimal, Interval, ScalarImpl, Timestamp}; + use risingwave_expr::expr::{BoxedExpression, ExpressionBoxExt, LiteralExpression}; + use risingwave_expr::table_function::build; + use risingwave_expr::ExprError; + use risingwave_pb::expr::table_function::PbType; const CHUNK_SIZE: usize = 1024; @@ -138,8 +198,8 @@ mod tests { #[tokio::test] async fn test_generate_series_timestamp() { - let start_time = str_to_timestamp("2008-03-01 00:00:00").unwrap(); - let stop_time = str_to_timestamp("2008-03-09 00:00:00").unwrap(); + let start_time = Timestamp::from_str("2008-03-01 00:00:00").unwrap(); + let stop_time = Timestamp::from_str("2008-03-09 00:00:00").unwrap(); let one_minute_step = Interval::from_minutes(1); let one_hour_step = Interval::from_minutes(60); let one_day_step = Interval::from_days(1); @@ -211,8 +271,8 @@ mod tests { #[tokio::test] async fn test_range_timestamp() { - let start_time = str_to_timestamp("2008-03-01 00:00:00").unwrap(); - let stop_time = str_to_timestamp("2008-03-09 00:00:00").unwrap(); + let start_time = Timestamp::from_str("2008-03-01 00:00:00").unwrap(); + let stop_time = Timestamp::from_str("2008-03-09 00:00:00").unwrap(); let one_minute_step = Interval::from_minutes(1); let one_hour_step = Interval::from_minutes(60); let one_day_step = Interval::from_days(1); @@ -246,4 +306,61 @@ mod tests { } assert_eq!(actual_cnt, expect_cnt); } + + #[tokio::test] + async fn test_generate_series_decimal() { + let start = Decimal::from_str("1").unwrap(); + let start_inf = Decimal::from_str("infinity").unwrap(); + let stop = Decimal::from_str("5").unwrap(); + let stop_inf = Decimal::from_str("-infinity").unwrap(); + + let step = Decimal::from_str("1").unwrap(); + let step_nan = Decimal::from_str("nan").unwrap(); + let step_inf = Decimal::from_str("infinity").unwrap(); + generate_series_decimal(start, stop, step, true).await; + generate_series_decimal(start_inf, stop, step, false).await; + generate_series_decimal(start_inf, stop_inf, step, false).await; + generate_series_decimal(start, stop_inf, step, false).await; + generate_series_decimal(start, stop, step_nan, false).await; + generate_series_decimal(start, stop, step_inf, false).await; + generate_series_decimal(start, stop_inf, step_nan, false).await; + } + + async fn generate_series_decimal( + start: Decimal, + stop: Decimal, + step: Decimal, + expect_ok: bool, + ) { + fn literal(ty: DataType, v: ScalarImpl) -> BoxedExpression { + LiteralExpression::new(ty, Some(v)).boxed() + } + let function = build( + PbType::GenerateSeries, + DataType::Decimal, + CHUNK_SIZE, + vec![ + literal(DataType::Decimal, start.into()), + literal(DataType::Decimal, stop.into()), + literal(DataType::Decimal, step.into()), + ], + ) + .unwrap(); + + let dummy_chunk = DataChunk::new_dummy(1); + let mut output = function.eval(&dummy_chunk).await; + while let Some(res) = output.next().await { + match res { + Ok(_) => { + assert!(expect_ok); + } + Err(ExprError::InvalidParam { .. }) => { + assert!(!expect_ok); + } + Err(_) => { + unreachable!(); + } + } + } + } } diff --git a/src/expr/src/table_function/generate_subscripts.rs b/src/expr/impl/src/table_function/generate_subscripts.rs similarity index 86% rename from src/expr/src/table_function/generate_subscripts.rs rename to src/expr/impl/src/table_function/generate_subscripts.rs index 2f0067d2453bc..c3ecd6afabf5b 100644 --- a/src/expr/src/table_function/generate_subscripts.rs +++ b/src/expr/impl/src/table_function/generate_subscripts.rs @@ -15,9 +15,7 @@ use auto_enums::auto_enum; use risingwave_common::array::ListRef; use risingwave_common::types::ScalarRefImpl; -use risingwave_expr_macro::function; - -use super::*; +use risingwave_expr::function; /// ```slt /// query I @@ -58,13 +56,13 @@ use super::*; /// ---- /// 1 /// ``` -#[function("generate_subscripts(list, int32, boolean) -> setof int32")] +#[function("generate_subscripts(anyarray, int4, boolean) -> setof int4")] fn generate_subscripts_reverse( array: ListRef<'_>, dim: i32, reverse: bool, -) -> Result> { - Ok(generate_subscripts_iterator(array, dim, reverse)) +) -> impl Iterator { + generate_subscripts_iterator(array, dim, reverse) } /// ```slt @@ -106,9 +104,9 @@ fn generate_subscripts_reverse( /// ---- /// 1 /// ``` -#[function("generate_subscripts(list, int32) -> setof int32")] -fn generate_subscripts(array: ListRef<'_>, dim: i32) -> Result> { - Ok(generate_subscripts_iterator(array, dim, false)) +#[function("generate_subscripts(anyarray, int4) -> setof int4")] +fn generate_subscripts(array: ListRef<'_>, dim: i32) -> impl Iterator { + generate_subscripts_iterator(array, dim, false) } #[auto_enum(Iterator)] @@ -132,7 +130,7 @@ fn generate_subscripts_inner(array: ListRef<'_>, dim: i32) -> (i32, i32) { ..=0 => nothing, 1 => (1, array.len() as i32 + 1), // Although RW's array can be zig-zag, we just look at the first element. - 2.. => match array.elem_at(0) { + 2.. => match array.get(0) { Some(Some(ScalarRefImpl::List(list))) => generate_subscripts_inner(list, dim - 1), _ => nothing, }, diff --git a/src/expr/src/table_function/jsonb.rs b/src/expr/impl/src/table_function/jsonb.rs similarity index 97% rename from src/expr/src/table_function/jsonb.rs rename to src/expr/impl/src/table_function/jsonb.rs index 4d70ab0d9b545..6abd195a61f0d 100644 --- a/src/expr/src/table_function/jsonb.rs +++ b/src/expr/impl/src/table_function/jsonb.rs @@ -16,9 +16,7 @@ use anyhow::anyhow; use risingwave_common::types::JsonbRef; -use risingwave_expr_macro::function; - -use super::*; +use risingwave_expr::{function, Result}; /// Expands the top-level JSON array into a set of JSON values. #[function("jsonb_array_elements(jsonb) -> setof jsonb")] diff --git a/src/expr/impl/src/table_function/mod.rs b/src/expr/impl/src/table_function/mod.rs new file mode 100644 index 0000000000000..ea46bebb5471e --- /dev/null +++ b/src/expr/impl/src/table_function/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod generate_series; +mod generate_subscripts; +mod jsonb; +mod pg_expandarray; +mod regexp_matches; +mod unnest; diff --git a/src/expr/impl/src/table_function/pg_expandarray.rs b/src/expr/impl/src/table_function/pg_expandarray.rs new file mode 100644 index 0000000000000..bf0107b703647 --- /dev/null +++ b/src/expr/impl/src/table_function/pg_expandarray.rs @@ -0,0 +1,49 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::types::{DataType, ListRef, ScalarRefImpl, StructType}; +use risingwave_expr::{function, Result}; + +/// Returns the input array as a set of rows with an index. +/// +/// ```slt +/// query II +/// select * from _pg_expandarray(array[1,2,null]); +/// ---- +/// 1 1 +/// 2 2 +/// NULL 3 +/// +/// query TI +/// select * from _pg_expandarray(array['one', null, 'three']); +/// ---- +/// one 1 +/// NULL 2 +/// three 3 +/// ``` +#[function( + "_pg_expandarray(anyarray) -> setof struct", + type_infer = "infer_type" +)] +fn _pg_expandarray(array: ListRef<'_>) -> impl Iterator>, i32)> { + #[allow(clippy::disallowed_methods)] + array.iter().zip(1..) +} + +fn infer_type(args: &[DataType]) -> Result { + Ok(DataType::Struct(StructType::new(vec![ + ("x", args[0].as_list().clone()), + ("n", DataType::Int32), + ]))) +} diff --git a/src/expr/src/table_function/regexp_matches.rs b/src/expr/impl/src/table_function/regexp_matches.rs similarity index 90% rename from src/expr/src/table_function/regexp_matches.rs rename to src/expr/impl/src/table_function/regexp_matches.rs index 961bbf2adb6e7..a5c3b0f3d4812 100644 --- a/src/expr/src/table_function/regexp_matches.rs +++ b/src/expr/impl/src/table_function/regexp_matches.rs @@ -13,11 +13,9 @@ // limitations under the License. use risingwave_common::array::ListValue; -use risingwave_expr_macro::function; +use risingwave_expr::function; -use super::*; -use crate::expr::expr_regexp::RegexpContext; -use crate::ExprError; +use crate::scalar::regexp::RegexpContext; #[function( "regexp_matches(varchar, varchar) -> setof varchar[]", @@ -36,10 +34,11 @@ fn regexp_matches<'a>( // ignored in PostgreSQL's behavior. let skip_flag = regex.regex.captures_len() > 1; let list = capture + .unwrap() .iter() .skip(if skip_flag { 1 } else { 0 }) .map(|mat| mat.map(|m| m.as_str().into())) - .collect_vec(); + .collect(); ListValue::new(list) }) } diff --git a/src/expr/src/table_function/unnest.rs b/src/expr/impl/src/table_function/unnest.rs similarity index 85% rename from src/expr/src/table_function/unnest.rs rename to src/expr/impl/src/table_function/unnest.rs index 40cc1719a207c..7534b903565dd 100644 --- a/src/expr/src/table_function/unnest.rs +++ b/src/expr/impl/src/table_function/unnest.rs @@ -14,13 +14,11 @@ use risingwave_common::array::ListRef; use risingwave_common::types::ScalarRefImpl; -use risingwave_expr_macro::function; - -use super::*; +use risingwave_expr::function; #[function( - "unnest(list) -> setof any", - type_infer = "|args| Ok(args[0].unnest_list())" + "unnest(anyarray) -> setof any", + type_infer = "|args| Ok(args[0].unnest_list().clone())" )] fn unnest(list: ListRef<'_>) -> impl Iterator>> { list.flatten().into_iter() diff --git a/src/expr/impl/tests/sig.rs b/src/expr/impl/tests/sig.rs new file mode 100644 index 0000000000000..95798d8284929 --- /dev/null +++ b/src/expr/impl/tests/sig.rs @@ -0,0 +1,82 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; + +risingwave_expr_impl::enable!(); + +use itertools::Itertools; +use risingwave_expr::sig::{FuncName, FuncSign, SigDataType, FUNCTION_REGISTRY}; +#[test] +fn test_func_sig_map() { + // convert FUNC_SIG_MAP to a more convenient map for testing + let mut new_map: HashMap, Vec>> = HashMap::new(); + for sig in FUNCTION_REGISTRY.iter_scalars() { + // exclude deprecated functions + if sig.deprecated { + continue; + } + + new_map + .entry(sig.name) + .or_default() + .entry(sig.inputs_type.to_vec()) + .or_default() + .push(sig.clone()); + } + + let mut duplicated: Vec<_> = new_map + .into_values() + .flat_map(|funcs_with_same_name| { + funcs_with_same_name.into_values().filter_map(|v| { + if v.len() > 1 { + Some(format!( + "{}({}) -> {}", + v[0].name.as_str_name().to_ascii_lowercase(), + v[0].inputs_type.iter().format(", "), + v.iter().map(|sig| &sig.ret_type).format("/") + )) + } else { + None + } + }) + }) + .collect(); + duplicated.sort(); + + // This snapshot shows the function signatures without a unique match. Frontend has to + // handle them specially without relying on FuncSigMap. + let expected = expect_test::expect![[r#" + [ + "cast(anyarray) -> character varying/anyarray", + "cast(bigint) -> rw_int256/integer/smallint/numeric/double precision/real/character varying", + "cast(boolean) -> integer/character varying", + "cast(character varying) -> jsonb/interval/timestamp without time zone/time without time zone/date/rw_int256/real/double precision/numeric/smallint/integer/bigint/character varying/boolean/bytea/anyarray", + "cast(date) -> timestamp without time zone/character varying", + "cast(double precision) -> numeric/real/bigint/integer/smallint/character varying", + "cast(integer) -> rw_int256/smallint/numeric/double precision/real/bigint/boolean/character varying", + "cast(interval) -> time without time zone/character varying", + "cast(jsonb) -> boolean/double precision/real/numeric/bigint/integer/smallint/character varying", + "cast(numeric) -> double precision/real/bigint/integer/smallint/character varying", + "cast(real) -> numeric/bigint/integer/smallint/double precision/character varying", + "cast(rw_int256) -> double precision/character varying", + "cast(smallint) -> rw_int256/numeric/double precision/real/bigint/integer/character varying", + "cast(time without time zone) -> interval/character varying", + "cast(timestamp without time zone) -> time without time zone/date/character varying", + "greatest() -> bytea/character varying/timestamp with time zone/timestamp without time zone/interval/time without time zone/date/rw_int256/serial/real/double precision/numeric/smallint/integer/bigint/boolean", + "least() -> bytea/character varying/timestamp with time zone/timestamp without time zone/interval/time without time zone/date/rw_int256/serial/real/double precision/numeric/smallint/integer/bigint/boolean", + ] + "#]]; + expected.assert_debug_eq(&duplicated); +} diff --git a/src/expr/macro/Cargo.toml b/src/expr/macro/Cargo.toml index c73d9c723dd69..bf761b142061f 100644 --- a/src/expr/macro/Cargo.toml +++ b/src/expr/macro/Cargo.toml @@ -11,7 +11,7 @@ proc-macro = true itertools = "0.11" proc-macro2 = "1" quote = "1" -syn = "2" +syn = { version = "2", features = ["full", "extra-traits"] } [lints] workspace = true diff --git a/src/expr/macro/src/context.rs b/src/expr/macro/src/context.rs new file mode 100644 index 0000000000000..e55c5adee6de2 --- /dev/null +++ b/src/expr/macro/src/context.rs @@ -0,0 +1,211 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use proc_macro2::TokenStream; +use quote::{quote, quote_spanned, ToTokens}; +use syn::parse::{Parse, ParseStream}; +use syn::{Error, FnArg, Ident, ItemFn, Result, Token, Type, Visibility}; + +use crate::utils::extend_vis_with_super; + +/// See [`super::define_context!`]. +#[derive(Debug, Clone)] +pub(super) struct DefineContextField { + vis: Visibility, + name: Ident, + ty: Type, +} + +/// See [`super::define_context!`]. +#[derive(Debug, Clone)] +pub(super) struct DefineContextAttr { + fields: Vec, +} + +impl Parse for DefineContextField { + fn parse(input: ParseStream<'_>) -> Result { + let vis: Visibility = input.parse()?; + let name: Ident = input.parse()?; + input.parse::()?; + let ty: Type = input.parse()?; + + Ok(Self { vis, name, ty }) + } +} + +impl Parse for DefineContextAttr { + fn parse(input: ParseStream<'_>) -> Result { + let fields = input.parse_terminated(DefineContextField::parse, Token![,])?; + Ok(Self { + fields: fields.into_iter().collect(), + }) + } +} + +impl DefineContextField { + pub(super) fn gen(self) -> Result { + let Self { vis, name, ty } = self; + + // We create a sub mod, so we need to extend the vis of getter. + let vis: Visibility = extend_vis_with_super(vis); + + { + let name_s = name.to_string(); + if name_s.to_uppercase() != name_s { + return Err(Error::new_spanned( + name, + "the name of context variable should be uppercase", + )); + } + } + + Ok(quote! { + #[allow(non_snake_case)] + pub mod #name { + use super::*; + pub type Type = #ty; + + tokio::task_local! { + static LOCAL_KEY: #ty; + } + + #vis fn try_with(f: F) -> Result + where + F: FnOnce(&#ty) -> R + { + LOCAL_KEY.try_with(f).map_err(|_| risingwave_expr::ContextUnavailable::new(stringify!(#name))).map_err(Into::into) + } + + pub fn scope(value: #ty, f: F) -> tokio::task::futures::TaskLocalFuture<#ty, F> + where + F: std::future::Future + { + LOCAL_KEY.scope(value, f) + } + + pub fn sync_scope(value: #ty, f: F) -> R + where + F: FnOnce() -> R + { + LOCAL_KEY.sync_scope(value, f) + } + } + }) + } +} + +impl DefineContextAttr { + pub(super) fn gen(self) -> Result { + let generated_fields: Vec = self + .fields + .into_iter() + .map(DefineContextField::gen) + .try_collect()?; + Ok(quote! { + #(#generated_fields)* + }) + } +} + +pub struct CaptureContextAttr { + /// The context variables which are captured. + captures: Vec, +} + +impl Parse for CaptureContextAttr { + fn parse(input: ParseStream<'_>) -> Result { + let captures = input.parse_terminated(Ident::parse, Token![,])?; + Ok(Self { + captures: captures.into_iter().collect(), + }) + } +} + +pub(super) fn generate_captured_function( + attr: CaptureContextAttr, + mut user_fn: ItemFn, +) -> Result { + let CaptureContextAttr { captures } = attr; + let orig_user_fn = user_fn.clone(); + + let sig = &mut user_fn.sig; + + // Modify the name. + { + let new_name = format!("{}_captured", sig.ident); + let new_name = Ident::new(&new_name, sig.ident.span()); + sig.ident = new_name; + } + + // Modify the inputs of sig. + let inputs = &mut sig.inputs; + if inputs.len() < captures.len() { + return Err(syn::Error::new_spanned( + inputs, + format!("expected at least {} inputs", captures.len()), + )); + } + + let (captured_inputs, remained_inputs) = { + let mut inputs = inputs.iter().cloned(); + let inputs = inputs.by_ref(); + let captured_inputs = inputs.take(captures.len()).collect_vec(); + let remained_inputs = inputs.collect_vec(); + (captured_inputs, remained_inputs) + }; + *inputs = remained_inputs.into_iter().collect(); + + // Modify the body + let body = &mut user_fn.block; + let new_body = { + let mut scoped = quote! { + // TODO: We can call the old function directly here. + #body + }; + + #[allow(clippy::disallowed_methods)] + for (context, arg) in captures.into_iter().zip(captured_inputs.into_iter()) { + let FnArg::Typed(arg) = arg else { + return Err(syn::Error::new_spanned( + arg, + "receiver is not allowed in captured function", + )); + }; + let name = arg.pat.into_token_stream(); + scoped = quote_spanned! { context.span()=> + // TODO: Can we add an assertion here that `&<<#context::Type> as Deref>::Target` is same as `#arg.ty`? + #context::try_with(|#name| { + #scoped + }).flatten() + } + } + scoped + }; + let new_user_fn = { + let vis = user_fn.vis; + let sig = user_fn.sig; + quote! { + #vis #sig { + {#new_body}.map_err(Into::into) + } + } + }; + + Ok(quote! { + #[allow(dead_code)] + #orig_user_fn + #new_user_fn + }) +} diff --git a/src/expr/macro/src/gen.rs b/src/expr/macro/src/gen.rs index 4106f0f9cda4d..9155853df5b7b 100644 --- a/src/expr/macro/src/gen.rs +++ b/src/expr/macro/src/gen.rs @@ -15,7 +15,7 @@ //! Generate code for the functions. use itertools::Itertools; -use proc_macro2::Span; +use proc_macro2::{Ident, Span}; use quote::{format_ident, quote}; use super::*; @@ -47,10 +47,53 @@ impl FunctionAttr { attrs } - /// Generate a descriptor of the function. + /// Generate the type infer function. + fn generate_type_infer_fn(&self) -> Result { + if let Some(func) = &self.type_infer { + if func == "panic" { + return Ok(quote! { |_| panic!("type inference function is not implemented") }); + } + // use the user defined type inference function + return Ok(func.parse().unwrap()); + } else if self.ret == "any" { + // TODO: if there are multiple "any", they should be the same type + if let Some(i) = self.args.iter().position(|t| t == "any") { + // infer as the type of "any" argument + return Ok(quote! { |args| Ok(args[#i].clone()) }); + } + if let Some(i) = self.args.iter().position(|t| t == "anyarray") { + // infer as the element type of "anyarray" argument + return Ok(quote! { |args| Ok(args[#i].as_list().clone()) }); + } + } else if self.ret == "anyarray" { + if let Some(i) = self.args.iter().position(|t| t == "anyarray") { + // infer as the type of "anyarray" argument + return Ok(quote! { |args| Ok(args[#i].clone()) }); + } + if let Some(i) = self.args.iter().position(|t| t == "any") { + // infer as the array type of "any" argument + return Ok(quote! { |args| Ok(DataType::List(Box::new(args[#i].clone()))) }); + } + } else if self.ret == "struct" { + if let Some(i) = self.args.iter().position(|t| t == "struct") { + // infer as the type of "struct" argument + return Ok(quote! { |args| Ok(args[#i].clone()) }); + } + } else { + // the return type is fixed + let ty = data_type(&self.ret); + return Ok(quote! { |_| Ok(#ty) }); + } + Err(Error::new( + Span::call_site(), + "type inference function is required", + )) + } + + /// Generate a descriptor of the scalar or table function. /// /// The types of arguments and return value should not contain wildcard. - pub fn generate_descriptor( + pub fn generate_function_descriptor( &self, user_fn: &UserFunctionAttr, build_fn: bool, @@ -59,222 +102,413 @@ impl FunctionAttr { return self.generate_table_function_descriptor(user_fn, build_fn); } let name = self.name.clone(); - let mut args = Vec::with_capacity(self.args.len()); - for ty in &self.args { - args.push(data_type_name(ty)); + let variadic = matches!(self.args.last(), Some(t) if t == "..."); + let args = match variadic { + true => &self.args[..self.args.len() - 1], + false => &self.args[..], } - let ret = data_type_name(&self.ret); + .iter() + .map(|ty| sig_data_type(ty)) + .collect_vec(); + let ret = sig_data_type(&self.ret); let pb_type = format_ident!("{}", utils::to_camel_case(&name)); let ctor_name = format_ident!("{}", self.ident_name()); - let descriptor_type = quote! { crate::sig::func::FuncSign }; let build_fn = if build_fn { let name = format_ident!("{}", user_fn.name); quote! { #name } } else { - self.generate_build_fn(user_fn)? + self.generate_build_scalar_function(user_fn, true)? }; + let type_infer_fn = self.generate_type_infer_fn()?; let deprecated = self.deprecated; + Ok(quote! { - #[ctor::ctor] + #[risingwave_expr::codegen::ctor] fn #ctor_name() { use risingwave_common::types::{DataType, DataTypeName}; - unsafe { crate::sig::func::_register(#descriptor_type { - func: risingwave_pb::expr::expr_node::Type::#pb_type, - inputs_type: &[#(#args),*], + use risingwave_expr::sig::{_register, FuncSign, SigDataType, FuncBuilder}; + + unsafe { _register(FuncSign { + name: risingwave_pb::expr::expr_node::Type::#pb_type.into(), + inputs_type: vec![#(#args),*], + variadic: #variadic, ret_type: #ret, - build: #build_fn, + build: FuncBuilder::Scalar(#build_fn), + type_infer: #type_infer_fn, deprecated: #deprecated, + state_type: None, + append_only: false, }) }; } }) } - fn generate_build_fn(&self, user_fn: &UserFunctionAttr) -> Result { - let num_args = self.args.len(); + /// Generate a build function for the scalar function. + /// + /// If `optimize_const` is true, the function will be optimized for constant arguments, + /// and fallback to the general version if any argument is not constant. + fn generate_build_scalar_function( + &self, + user_fn: &UserFunctionAttr, + optimize_const: bool, + ) -> Result { + let variadic = matches!(self.args.last(), Some(t) if t == "..."); + let num_args = self.args.len() - if variadic { 1 } else { 0 }; let fn_name = format_ident!("{}", user_fn.name); - let arg_arrays = self - .args - .iter() - .map(|t| format_ident!("{}", types::array_type(t))); - let ret_array = format_ident!("{}", types::array_type(&self.ret)); - let arg_types = self - .args + let struct_name = match optimize_const { + true => format_ident!("{}OptimizeConst", utils::to_camel_case(&self.ident_name())), + false => format_ident!("{}", utils::to_camel_case(&self.ident_name())), + }; + + // we divide all arguments into two groups: prebuilt and non-prebuilt. + // prebuilt arguments are collected from the "prebuild" field. + // let's say we have a function with 3 arguments: [0, 1, 2] + // and the prebuild field contains "$1". + // then we have: + // prebuilt_indices = [1] + // non_prebuilt_indices = [0, 2] + // + // if the const argument optimization is enabled, prebuilt arguments are + // evaluated at build time, thus the children only contain non-prebuilt arguments: + // children_indices = [0, 2] + // otherwise, the children contain all arguments: + // children_indices = [0, 1, 2] + + let prebuilt_indices = match &self.prebuild { + Some(s) => (0..num_args) + .filter(|i| s.contains(&format!("${i}"))) + .collect_vec(), + None => vec![], + }; + let non_prebuilt_indices = match &self.prebuild { + Some(s) => (0..num_args) + .filter(|i| !s.contains(&format!("${i}"))) + .collect_vec(), + _ => (0..num_args).collect_vec(), + }; + let children_indices = match optimize_const { + #[allow(clippy::redundant_clone)] // false-positive + true => non_prebuilt_indices.clone(), + false => (0..num_args).collect_vec(), + }; + + /// Return a list of identifiers with the given prefix and indices. + fn idents(prefix: &str, indices: &[usize]) -> Vec { + indices + .iter() + .map(|i| format_ident!("{prefix}{i}")) + .collect() + } + let inputs = idents("i", &children_indices); + let prebuilt_inputs = idents("i", &prebuilt_indices); + let non_prebuilt_inputs = idents("i", &non_prebuilt_indices); + let array_refs = idents("array", &children_indices); + let arrays = idents("a", &children_indices); + let datums = idents("v", &children_indices); + let arg_arrays = children_indices .iter() - .map(|t| types::ref_type(t).parse::().unwrap()); - let ret_type = types::ref_type(&self.ret).parse::().unwrap(); - let exprs = (0..num_args) - .map(|i| format_ident!("e{i}")) - .collect::>(); - #[expect( - clippy::redundant_clone, - reason = "false positive https://github.com/rust-lang/rust-clippy/issues/10545" - )] - let exprs0 = exprs.clone(); - - let build_expr = if self.ret == "varchar" && user_fn.is_writer_style() { - let template_struct = match num_args { - 1 => format_ident!("UnaryBytesExpression"), - 2 => format_ident!("BinaryBytesExpression"), - 3 => format_ident!("TernaryBytesExpression"), - 4 => format_ident!("QuaternaryBytesExpression"), - _ => return Err(Error::new(Span::call_site(), "unsupported arguments")), - }; - let args = (0..=num_args).map(|i| format_ident!("x{i}")); - let args1 = args.clone(); - let func = match user_fn.return_type { - ReturnType::T => quote! { Ok(#fn_name(#(#args1),*)) }, - ReturnType::Result => quote! { #fn_name(#(#args1),*) }, - _ => todo!("returning Option is not supported yet"), - }; + .map(|i| format_ident!("{}", types::array_type(&self.args[*i]))); + let arg_types = children_indices.iter().map(|i| { + types::ref_type(&self.args[*i]) + .parse::() + .unwrap() + }); + let annotation: TokenStream2 = match user_fn.core_return_type.as_str() { + // add type annotation for functions that return generic types + "T" | "T1" | "T2" | "T3" => format!(": Option<{}>", types::owned_type(&self.ret)) + .parse() + .unwrap(), + _ => quote! {}, + }; + let ret_array_type = format_ident!("{}", types::array_type(&self.ret)); + let builder_type = format_ident!("{}Builder", types::array_type(&self.ret)); + let prebuilt_arg_type = match &self.prebuild { + Some(s) if optimize_const => s.split("::").next().unwrap().parse().unwrap(), + _ => quote! { () }, + }; + let prebuilt_arg_value = match &self.prebuild { + // example: + // prebuild = "RegexContext::new($1)" + // return = "RegexContext::new(i1)" + Some(s) => s + .replace('$', "i") + .parse() + .expect("invalid prebuild syntax"), + None => quote! { () }, + }; + let prebuild_const = if self.prebuild.is_some() && optimize_const { + let build_general = self.generate_build_scalar_function(user_fn, false)?; + quote! {{ + let build_general = #build_general; + #( + // try to evaluate constant for prebuilt arguments + let #prebuilt_inputs = match children[#prebuilt_indices].eval_const() { + Ok(s) => s, + // prebuilt argument is not constant, fallback to general + Err(_) => return build_general(return_type, children), + }; + // get reference to the constant value + let #prebuilt_inputs = match &#prebuilt_inputs { + Some(s) => s.as_scalar_ref_impl().try_into()?, + // the function should always return null if any const argument is null + None => return Ok(Box::new(risingwave_expr::expr::LiteralExpression::new( + return_type, + None, + ))), + }; + )* + #prebuilt_arg_value + }} + } else { + quote! { () } + }; + + // ensure the number of children matches the number of arguments + let check_children = match variadic { + true => quote! { risingwave_expr::ensure!(children.len() >= #num_args); }, + false => quote! { risingwave_expr::ensure!(children.len() == #num_args); }, + }; + + // evaluate variadic arguments in `eval` + let eval_variadic = variadic.then(|| { quote! { - Ok(Box::new(crate::expr::template::#template_struct::<#(#arg_arrays),*, _>::new( - #(#exprs),*, - return_type, - |#(#args),*| #func, - ))) - } - } else if self.args.iter().all(|t| t == "boolean") - && self.ret == "boolean" - && !user_fn.return_type.contains_result() - && self.batch_fn.is_some() - { - let template_struct = match num_args { - 1 => format_ident!("BooleanUnaryExpression"), - 2 => format_ident!("BooleanBinaryExpression"), - _ => return Err(Error::new(Span::call_site(), "unsupported arguments")), - }; - let batch_fn = format_ident!("{}", self.batch_fn.as_ref().unwrap()); - let args = (0..num_args).map(|i| format_ident!("x{i}")); - let args1 = args.clone(); - let func = if user_fn.arg_option && user_fn.return_type == ReturnType::Option { - quote! { #fn_name(#(#args1),*) } - } else if user_fn.arg_option { - quote! { Some(#fn_name(#(#args1),*)) } - } else { - let args2 = args.clone(); - let args3 = args.clone(); - quote! { - match (#(#args1),*) { - (#(Some(#args2)),*) => Some(#fn_name(#(#args3),*)), - _ => None, - } + let mut columns = Vec::with_capacity(self.children.len() - #num_args); + for child in &self.children[#num_args..] { + columns.push(child.eval(input).await?); } - }; + let variadic_input = DataChunk::new(columns, input.visibility().clone()); + } + }); + // evaluate variadic arguments in `eval_row` + let eval_row_variadic = variadic.then(|| { quote! { - Ok(Box::new(crate::expr::template_fast::#template_struct::new( - #(#exprs,)* - #batch_fn, - |#(#args),*| #func, - ))) + let mut row = Vec::with_capacity(self.children.len() - #num_args); + for child in &self.children[#num_args..] { + row.push(child.eval_row(input).await?); + } + let variadic_row = OwnedRow::new(row); } - } else if self.args.len() == 2 && self.ret == "boolean" && user_fn.is_pure() { + }); + + let generic = (self.ret == "boolean" && user_fn.generic == 3).then(|| { + // XXX: for generic compare functions, we need to specify the compatible type let compatible_type = types::ref_type(types::min_compatible_type(&self.args)) .parse::() .unwrap(); - let args = (0..num_args).map(|i| format_ident!("x{i}")); - let args1 = args.clone(); - let generic = if user_fn.generic == 3 { - // XXX: for generic compare functions, we need to specify the compatible type - quote! { ::<_, _, #compatible_type> } - } else { - quote! {} - }; - quote! { - Ok(Box::new(crate::expr::template_fast::CompareExpression::<_, #(#arg_arrays),*>::new( - #(#exprs,)* - |#(#args),*| #fn_name #generic(#(#args1),*), - ))) - } - } else if self.args.iter().all(|t| types::is_primitive(t)) && user_fn.is_pure() { - let template_struct = match num_args { - 0 => format_ident!("NullaryExpression"), - 1 => format_ident!("UnaryExpression"), - 2 => format_ident!("BinaryExpression"), - _ => return Err(Error::new(Span::call_site(), "unsupported arguments")), + quote! { ::<_, _, #compatible_type> } + }); + let prebuilt_arg = match (&self.prebuild, optimize_const) { + // use the prebuilt argument + (Some(_), true) => quote! { &self.prebuilt_arg, }, + // build the argument on site + (Some(_), false) => quote! { &#prebuilt_arg_value, }, + // no prebuilt argument + (None, _) => quote! {}, + }; + let variadic_args = variadic.then(|| quote! { variadic_row, }); + let context = user_fn.context.then(|| quote! { &self.context, }); + let writer = user_fn.write.then(|| quote! { &mut writer, }); + let await_ = user_fn.async_.then(|| quote! { .await }); + // call the user defined function + // inputs: [ Option ] + let mut output = quote! { #fn_name #generic( + #(#non_prebuilt_inputs,)* + #prebuilt_arg + #variadic_args + #context + #writer + ) #await_ }; + // handle error if the function returns `Result` + // wrap a `Some` if the function doesn't return `Option` + output = match user_fn.return_type_kind { + // XXX: we don't support void type yet. return null::int for now. + _ if self.ret == "void" => quote! { { #output; Option::::None } }, + ReturnTypeKind::T => quote! { Some(#output) }, + ReturnTypeKind::Option => output, + ReturnTypeKind::Result => quote! { Some(#output?) }, + ReturnTypeKind::ResultOption => quote! { #output? }, + }; + // if user function accepts non-option arguments, we assume the function + // returns null on null input, so we need to unwrap the inputs before calling. + if !user_fn.arg_option { + output = quote! { + match (#(#inputs,)*) { + (#(Some(#inputs),)*) => #output, + _ => None, + } }; + }; + // now the `output` is: Option + let append_output = match user_fn.write { + true => quote! {{ + let mut writer = builder.writer().begin(); + if #output.is_some() { + writer.finish(); + } else { + drop(writer); + builder.append_null(); + } + }}, + false if user_fn.core_return_type == "impl AsRef < [u8] >" => quote! { + builder.append(#output.as_ref().map(|s| s.as_ref())); + }, + false => quote! { + let output #annotation = #output; + builder.append(output.as_ref().map(|s| s.as_scalar_ref())); + }, + }; + // the output expression in `eval_row` + let row_output = match user_fn.write { + true => quote! {{ + let mut writer = String::new(); + #output.map(|_| writer.into()) + }}, + false if user_fn.core_return_type == "impl AsRef < [u8] >" => quote! { + #output.map(|s| s.as_ref().into()) + }, + false => quote! {{ + let output #annotation = #output; + output.map(|s| s.into()) + }}, + }; + // the main body in `eval` + let eval = if let Some(batch_fn) = &self.batch_fn { + assert!( + !variadic, + "customized batch function is not supported for variadic functions" + ); + // user defined batch function + let fn_name = format_ident!("{}", batch_fn); quote! { - Ok(Box::new(crate::expr::template_fast::#template_struct::<_, #(#arg_types,)* #ret_type>::new( - #(#exprs,)* - return_type, - #fn_name, - ))) + let c = #fn_name(#(#arrays),*); + Ok(Arc::new(c.into())) } - } else if user_fn.arg_option || user_fn.return_type.contains_option() { - let template_struct = match num_args { - 1 => format_ident!("UnaryNullableExpression"), - 2 => format_ident!("BinaryNullableExpression"), - 3 => format_ident!("TernaryNullableExpression"), - _ => return Err(Error::new(Span::call_site(), "unsupported arguments")), - }; - let args = (0..num_args).map(|i| format_ident!("x{i}")); - let args1 = args.clone(); - let generic = if user_fn.generic == 3 { - // XXX: for generic compare functions, we need to specify the compatible type - let compatible_type = types::ref_type(types::min_compatible_type(&self.args)) - .parse::() - .unwrap(); - quote! { ::<_, _, #compatible_type> } - } else { - quote! {} - }; - let mut func = quote! { #fn_name #generic(#(#args1),*) }; - func = match user_fn.return_type { - ReturnType::T => quote! { Ok(Some(#func)) }, - ReturnType::Option => quote! { Ok(#func) }, - ReturnType::Result => quote! { #func.map(Some) }, - ReturnType::ResultOption => quote! { #func }, - }; - if !user_fn.arg_option { - let args2 = args.clone(); - let args3 = args.clone(); - func = quote! { - match (#(#args2),*) { - (#(Some(#args3)),*) => #func, - _ => Ok(None), - } - }; - }; - quote! { - Ok(Box::new(crate::expr::template::#template_struct::<#(#arg_arrays,)* #ret_array, _>::new( - #(#exprs,)* - return_type, - |#(#args),*| #func, - ))) + } else if (types::is_primitive(&self.ret) || self.ret == "boolean") + && user_fn.is_pure() + && !variadic + && self.prebuild.is_none() + { + // SIMD optimization for primitive types + match self.args.len() { + 0 => quote! { + let c = #ret_array_type::from_iter_bitmap( + std::iter::repeat_with(|| #fn_name()).take(input.capacity()) + Bitmap::ones(input.capacity()), + ); + Ok(Arc::new(c.into())) + }, + 1 => quote! { + let c = #ret_array_type::from_iter_bitmap( + a0.raw_iter().map(|a| #fn_name(a)), + a0.null_bitmap().clone() + ); + Ok(Arc::new(c.into())) + }, + 2 => quote! { + // allow using `zip` for performance + #[allow(clippy::disallowed_methods)] + let c = #ret_array_type::from_iter_bitmap( + a0.raw_iter() + .zip(a1.raw_iter()) + .map(|(a, b)| #fn_name #generic(a, b)), + a0.null_bitmap() & a1.null_bitmap(), + ); + Ok(Arc::new(c.into())) + }, + n => todo!("SIMD optimization for {n} arguments"), } } else { - let template_struct = match num_args { - 0 => format_ident!("NullaryExpression"), - 1 => format_ident!("UnaryExpression"), - 2 => format_ident!("BinaryExpression"), - 3 => format_ident!("TernaryExpression"), - _ => return Err(Error::new(Span::call_site(), "unsupported arguments")), - }; - let args = (0..num_args).map(|i| format_ident!("x{i}")); - let args1 = args.clone(); - let func = match user_fn.return_type { - ReturnType::T => quote! { Ok(#fn_name(#(#args1),*)) }, - ReturnType::Result => quote! { #fn_name(#(#args1),*) }, - _ => panic!("return type should not contain Option"), + // no optimization + let array_zip = match children_indices.len() { + 0 => quote! { std::iter::repeat(()).take(input.capacity()) }, + _ => quote! { multizip((#(#arrays.iter(),)*)) }, }; + let let_variadic = variadic.then(|| { + quote! { + let variadic_row = variadic_input.row_at_unchecked_vis(i); + } + }); quote! { - Ok(Box::new(crate::expr::template::#template_struct::<#(#arg_arrays,)* #ret_array, _>::new( - #(#exprs,)* - return_type, - |#(#args),*| #func, - ))) + let mut builder = #builder_type::with_type(input.capacity(), self.context.return_type.clone()); + + if input.is_compacted() { + for (i, (#(#inputs,)*)) in #array_zip.enumerate() { + #let_variadic + #append_output + } + } else { + // allow using `zip` for performance + #[allow(clippy::disallowed_methods)] + for (i, ((#(#inputs,)*), visible)) in #array_zip.zip(input.visibility().iter()).enumerate() { + if !visible { + builder.append_null(); + continue; + } + #let_variadic + #append_output + } + } + Ok(Arc::new(builder.finish().into())) } }; + Ok(quote! { - |return_type, children| { + |return_type: DataType, children: Vec| + -> risingwave_expr::Result + { + use std::sync::Arc; use risingwave_common::array::*; use risingwave_common::types::*; + use risingwave_common::buffer::Bitmap; + use risingwave_common::row::OwnedRow; + use risingwave_common::util::iter_util::ZipEqFast; - crate::ensure!(children.len() == #num_args); - let mut iter = children.into_iter(); - #(let #exprs0 = iter.next().unwrap();)* + use risingwave_expr::expr::{Context, BoxedExpression}; + use risingwave_expr::Result; + use risingwave_expr::codegen::*; + + #check_children + let prebuilt_arg = #prebuild_const; + let context = Context { + return_type, + arg_types: children.iter().map(|c| c.return_type()).collect(), + }; + + #[derive(Debug)] + struct #struct_name { + context: Context, + children: Vec, + prebuilt_arg: #prebuilt_arg_type, + } + #[async_trait] + impl risingwave_expr::expr::Expression for #struct_name { + fn return_type(&self) -> DataType { + self.context.return_type.clone() + } + async fn eval(&self, input: &DataChunk) -> Result { + #( + let #array_refs = self.children[#children_indices].eval(input).await?; + let #arrays: &#arg_arrays = #array_refs.as_ref().into(); + )* + #eval_variadic + #eval + } + async fn eval_row(&self, input: &OwnedRow) -> Result { + #( + let #datums = self.children[#children_indices].eval_row(input).await?; + let #inputs: Option<#arg_types> = #datums.as_ref().map(|s| s.as_scalar_ref_impl().try_into().unwrap()); + )* + #eval_row_variadic + Ok(#row_output) + } + } - #build_expr + Ok(Box::new(#struct_name { + context, + children, + prebuilt_arg, + })) } }) } @@ -282,47 +516,72 @@ impl FunctionAttr { /// Generate a descriptor of the aggregate function. /// /// The types of arguments and return value should not contain wildcard. - pub fn generate_agg_descriptor( + /// `user_fn` could be either `fn` or `impl`. + /// If `build_fn` is true, `user_fn` must be a `fn` that builds the aggregate function. + pub fn generate_aggregate_descriptor( &self, - user_fn: &UserFunctionAttr, + user_fn: &AggregateFnOrImpl, build_fn: bool, ) -> Result { let name = self.name.clone(); let mut args = Vec::with_capacity(self.args.len()); for ty in &self.args { - args.push(data_type_name(ty)); + args.push(sig_data_type(ty)); } - let ret = data_type_name(&self.ret); + let ret = sig_data_type(&self.ret); + let state_type = match &self.state { + Some(ty) if ty != "ref" => { + let ty = data_type(ty); + quote! { Some(#ty) } + } + _ => quote! { None }, + }; + let append_only = match build_fn { + false => !user_fn.has_retract(), + true => self.append_only, + }; let pb_type = format_ident!("{}", utils::to_camel_case(&name)); - let ctor_name = format_ident!("{}", self.ident_name()); - let descriptor_type = quote! { crate::sig::agg::AggFuncSig }; + let ctor_name = match append_only { + false => format_ident!("{}", self.ident_name()), + true => format_ident!("{}_append_only", self.ident_name()), + }; let build_fn = if build_fn { - let name = format_ident!("{}", user_fn.name); + let name = format_ident!("{}", user_fn.as_fn().name); quote! { #name } } else { self.generate_agg_build_fn(user_fn)? }; + let type_infer_fn = self.generate_type_infer_fn()?; + let deprecated = self.deprecated; + Ok(quote! { - #[ctor::ctor] + #[risingwave_expr::codegen::ctor] fn #ctor_name() { use risingwave_common::types::{DataType, DataTypeName}; - unsafe { crate::sig::agg::_register(#descriptor_type { - func: crate::agg::AggKind::#pb_type, - inputs_type: &[#(#args),*], + use risingwave_expr::sig::{_register, FuncSign, SigDataType, FuncBuilder}; + + unsafe { _register(FuncSign { + name: risingwave_expr::aggregate::AggKind::#pb_type.into(), + inputs_type: vec![#(#args),*], + variadic: false, ret_type: #ret, - build: #build_fn, + build: FuncBuilder::Aggregate(#build_fn), + type_infer: #type_infer_fn, + state_type: #state_type, + append_only: #append_only, + deprecated: #deprecated, }) }; } }) } /// Generate build function for aggregate function. - fn generate_agg_build_fn(&self, user_fn: &UserFunctionAttr) -> Result { + fn generate_agg_build_fn(&self, user_fn: &AggregateFnOrImpl) -> Result { let state_type: TokenStream2 = match &self.state { Some(state) if state == "ref" => types::ref_type(&self.ret).parse().unwrap(), - Some(state) if state != "ref" => state.parse().unwrap(), + Some(state) if state != "ref" => types::owned_type(state).parse().unwrap(), _ => types::owned_type(&self.ret).parse().unwrap(), }; let let_arrays = self @@ -331,11 +590,9 @@ impl FunctionAttr { .enumerate() .map(|(i, arg)| { let array = format_ident!("a{i}"); - let variant: TokenStream2 = types::variant(arg).parse().unwrap(); + let array_type: TokenStream2 = types::array_type(arg).parse().unwrap(); quote! { - let ArrayImpl::#variant(#array) = &**input.column_at(#i) else { - bail!("input type mismatch. expect: {}", stringify!(#variant)); - }; + let #array: &#array_type = input.column_at(#i).as_ref().into(); } }) .collect_vec(); @@ -364,28 +621,49 @@ impl FunctionAttr { } } }); - let fn_name = format_ident!("{}", user_fn.name); let args = (0..self.args.len()).map(|i| format_ident!("v{i}")); let args = quote! { #(#args,)* }; - let retract = match user_fn.retract { - true => quote! { matches!(op, Op::Delete | Op::UpdateDelete) }, - false => quote! {}, - }; - let check_retract = match user_fn.retract { - true => quote! {}, - false => { - let msg = format!("aggregate function {} only supports append", self.name); - quote! { assert_eq!(op, Op::Insert, #msg); } + let panic_on_retract = { + let msg = format!( + "attempt to retract on aggregate function {}, but it is append-only", + self.name + ); + quote! { assert_eq!(op, Op::Insert, #msg); } + }; + let mut next_state = match user_fn { + AggregateFnOrImpl::Fn(f) => { + let fn_name = format_ident!("{}", f.name); + match f.retract { + true => { + quote! { #fn_name(state, #args matches!(op, Op::Delete | Op::UpdateDelete)) } + } + false => quote! {{ + #panic_on_retract + #fn_name(state, #args) + }}, + } + } + AggregateFnOrImpl::Impl(i) => { + let retract = match i.retract { + Some(_) => quote! { self.function.retract(state, #args) }, + None => panic_on_retract, + }; + quote! { + if matches!(op, Op::Delete | Op::UpdateDelete) { + #retract + } else { + self.function.accumulate(state, #args) + } + } } }; - let mut next_state = quote! { #fn_name(state, #args #retract) }; - next_state = match user_fn.return_type { - ReturnType::T => quote! { Some(#next_state) }, - ReturnType::Option => next_state, - ReturnType::Result => quote! { Some(#next_state?) }, - ReturnType::ResultOption => quote! { #next_state? }, + next_state = match user_fn.accumulate().return_type_kind { + ReturnTypeKind::T => quote! { Some(#next_state) }, + ReturnTypeKind::Option => next_state, + ReturnTypeKind::Result => quote! { Some(#next_state?) }, + ReturnTypeKind::ResultOption => quote! { #next_state? }, }; - if !user_fn.arg_option { + if !user_fn.accumulate().arg_option { match self.args.len() { 0 => { next_state = quote! { @@ -396,9 +674,23 @@ impl FunctionAttr { }; } 1 => { - let first_state = match &self.init_state { - Some(_) => quote! { unreachable!() }, - _ => quote! { Some(v0.into()) }, + let first_state = if self.init_state.is_some() { + // for count, the state will never be None + quote! { unreachable!() } + } else if let Some(s) = &self.state && s == "ref" { + // for min/max/first/last, the state is the first value + quote! { Some(v0) } + } else if let AggregateFnOrImpl::Impl(impl_) = user_fn && impl_.create_state.is_some() { + // use user-defined create_state function + quote! {{ + let state = self.function.create_state(); + #next_state + }} + } else { + quote! {{ + let state = #state_type::default(); + #next_state + }} }; next_state = quote! { match (state, v0) { @@ -411,6 +703,40 @@ impl FunctionAttr { _ => todo!("multiple arguments are not supported for non-option function"), } } + let get_result = match user_fn { + AggregateFnOrImpl::Impl(impl_) if impl_.finalize.is_some() => { + quote! { + let state = match state { + Some(s) => s.as_scalar_ref_impl().try_into().unwrap(), + None => return Ok(None), + }; + Ok(Some(self.function.finalize(state).into())) + } + } + _ => quote! { Ok(state.clone()) }, + }; + let function_field = match user_fn { + AggregateFnOrImpl::Fn(_) => quote! {}, + AggregateFnOrImpl::Impl(i) => { + let struct_name = format_ident!("{}", i.struct_name); + let generic = self.generic.as_ref().map(|g| { + let g = format_ident!("{g}"); + quote! { <#g> } + }); + quote! { function: #struct_name #generic, } + } + }; + let function_new = match user_fn { + AggregateFnOrImpl::Fn(_) => quote! {}, + AggregateFnOrImpl::Impl(i) => { + let struct_name = format_ident!("{}", i.struct_name); + let generic = self.generic.as_ref().map(|g| { + let g = format_ident!("{g}"); + quote! { ::<#g> } + }); + quote! { function: #struct_name #generic :: default(), } + } + }; Ok(quote! { |agg| { @@ -422,16 +748,18 @@ impl FunctionAttr { use risingwave_common::buffer::Bitmap; use risingwave_common::estimate_size::EstimateSize; - use crate::Result; - use crate::agg::AggregateState; + use risingwave_expr::Result; + use risingwave_expr::aggregate::AggregateState; + use risingwave_expr::codegen::async_trait; #[derive(Clone)] struct Agg { return_type: DataType, + #function_field } - #[async_trait::async_trait] - impl crate::agg::AggregateFunction for Agg { + #[async_trait] + impl risingwave_expr::aggregate::AggregateFunction for Agg { fn return_type(&self) -> DataType { self.return_type.clone() } @@ -442,23 +770,10 @@ impl FunctionAttr { #(#let_arrays)* let state0 = state0.as_datum_mut(); let mut state: Option<#state_type> = #let_state; - match input.vis() { - Vis::Bitmap(bitmap) => { - for row_id in bitmap.iter_ones() { - let op = unsafe { *input.ops().get_unchecked(row_id) }; - #check_retract - #(#let_values)* - state = #next_state; - } - } - Vis::Compact(_) => { - for row_id in 0..input.capacity() { - let op = unsafe { *input.ops().get_unchecked(row_id) }; - #check_retract - #(#let_values)* - state = #next_state; - } - } + for row_id in input.visibility().iter_ones() { + let op = unsafe { *input.ops().get_unchecked(row_id) }; + #(#let_values)* + state = #next_state; } *state0 = #assign_state; Ok(()) @@ -469,27 +784,22 @@ impl FunctionAttr { #(#let_arrays)* let state0 = state0.as_datum_mut(); let mut state: Option<#state_type> = #let_state; - match input.vis() { - Vis::Bitmap(bitmap) => { - for row_id in bitmap.iter_ones() { - if row_id < range.start { - continue; - } else if row_id >= range.end { - break; - } - let op = unsafe { *input.ops().get_unchecked(row_id) }; - #check_retract - #(#let_values)* - state = #next_state; - } + if input.is_compacted() { + for row_id in range { + let op = unsafe { *input.ops().get_unchecked(row_id) }; + #(#let_values)* + state = #next_state; } - Vis::Compact(_) => { - for row_id in range { - let op = unsafe { *input.ops().get_unchecked(row_id) }; - #check_retract - #(#let_values)* - state = #next_state; + } else { + for row_id in input.visibility().iter_ones() { + if row_id < range.start { + continue; + } else if row_id >= range.end { + break; } + let op = unsafe { *input.ops().get_unchecked(row_id) }; + #(#let_values)* + state = #next_state; } } *state0 = #assign_state; @@ -497,12 +807,14 @@ impl FunctionAttr { } async fn get_result(&self, state: &AggregateState) -> Result { - Ok(state.as_datum().clone()) + let state = state.as_datum(); + #get_result } } Ok(Box::new(Agg { return_type: agg.return_type.clone(), + #function_new })) } }) @@ -519,41 +831,37 @@ impl FunctionAttr { let name = self.name.clone(); let mut args = Vec::with_capacity(self.args.len()); for ty in &self.args { - args.push(data_type_name(ty)); + args.push(sig_data_type(ty)); } - let ret = data_type_name(&self.ret); + let ret = sig_data_type(&self.ret); let pb_type = format_ident!("{}", utils::to_camel_case(&name)); let ctor_name = format_ident!("{}", self.ident_name()); - let descriptor_type = quote! { crate::sig::table_function::FuncSign }; let build_fn = if build_fn { let name = format_ident!("{}", user_fn.name); quote! { #name } } else { self.generate_build_table_function(user_fn)? }; - let type_infer_fn = if let Some(func) = &self.type_infer { - func.parse().unwrap() - } else { - if matches!(self.ret.as_str(), "any" | "list" | "struct") { - return Err(Error::new( - Span::call_site(), - format!("type inference function is required for {}", self.ret), - )); - } - let ty = data_type(&self.ret); - quote! { |_| Ok(#ty) } - }; + let type_infer_fn = self.generate_type_infer_fn()?; + let deprecated = self.deprecated; + Ok(quote! { - #[ctor::ctor] + #[risingwave_expr::codegen::ctor] fn #ctor_name() { use risingwave_common::types::{DataType, DataTypeName}; - unsafe { crate::sig::table_function::_register(#descriptor_type { - func: risingwave_pb::expr::table_function::Type::#pb_type, - inputs_type: &[#(#args),*], + use risingwave_expr::sig::{_register, FuncSign, SigDataType, FuncBuilder}; + + unsafe { _register(FuncSign { + name: risingwave_pb::expr::table_function::Type::#pb_type.into(), + inputs_type: vec![#(#args),*], + variadic: false, ret_type: #ret, - build: #build_fn, + build: FuncBuilder::Table(#build_fn), type_infer: #type_infer_fn, + deprecated: #deprecated, + state_type: None, + append_only: false, }) }; } }) @@ -563,7 +871,7 @@ impl FunctionAttr { let num_args = self.args.len(); let return_types = output_types(&self.ret); let fn_name = format_ident!("{}", user_fn.name); - let struct_name = format_ident!("{}", self.ident_name()); + let struct_name = format_ident!("{}", utils::to_camel_case(&self.ident_name())); let arg_ids = (0..num_args) .filter(|i| match &self.prebuild { Some(s) => !s.contains(&format!("${i}")), @@ -601,50 +909,61 @@ impl FunctionAttr { .map(|i| quote! { self.return_type.as_struct().types().nth(#i).unwrap().clone() }) .collect() }; + #[allow(clippy::disallowed_methods)] + let optioned_outputs = user_fn + .core_return_type + .split(',') + .map(|t| t.contains("Option")) + // example: "(Option<&str>, i32)" => [true, false] + .zip(&outputs) + .map(|(optional, o)| match optional { + false => quote! { Some(#o.as_scalar_ref()) }, + true => quote! { #o.map(|o| o.as_scalar_ref()) }, + }) + .collect_vec(); let build_value_array = if return_types.len() == 1 { quote! { let [value_array] = value_arrays; } } else { quote! { - let bitmap = value_arrays[0].null_bitmap().clone(); let value_array = StructArray::new( self.return_type.as_struct().clone(), value_arrays.to_vec(), - bitmap, + Bitmap::ones(len), ).into_ref(); } }; - let const_arg = match &self.prebuild { - Some(_) => quote! { &self.const_arg }, + let prebuilt_arg = match &self.prebuild { + Some(_) => quote! { &self.prebuilt_arg }, None => quote! {}, }; - let const_arg_type = match &self.prebuild { + let prebuilt_arg_type = match &self.prebuild { Some(s) => s.split("::").next().unwrap().parse().unwrap(), None => quote! { () }, }; - let const_arg_value = match &self.prebuild { + let prebuilt_arg_value = match &self.prebuild { Some(s) => s .replace('$', "child") .parse() .expect("invalid prebuild syntax"), None => quote! { () }, }; - let iter = match user_fn.return_type { - ReturnType::T => quote! { iter }, - ReturnType::Option => quote! { iter.flatten() }, - ReturnType::Result => quote! { iter? }, - ReturnType::ResultOption => quote! { value?.flatten() }, + let iter = match user_fn.return_type_kind { + ReturnTypeKind::T => quote! { iter }, + ReturnTypeKind::Option => quote! { iter.flatten() }, + ReturnTypeKind::Result => quote! { iter? }, + ReturnTypeKind::ResultOption => quote! { value?.flatten() }, }; - let iterator_item_type = user_fn.iterator_item_type.clone().ok_or_else(|| { + let iterator_item_type = user_fn.iterator_item_kind.clone().ok_or_else(|| { Error::new( user_fn.return_type_span, "expect `impl Iterator` in return type", ) })?; let output = match iterator_item_type { - ReturnType::T => quote! { Some(output) }, - ReturnType::Option => quote! { output }, - ReturnType::Result => quote! { Some(output?) }, - ReturnType::ResultOption => quote! { output? }, + ReturnTypeKind::T => quote! { Some(output) }, + ReturnTypeKind::Option => quote! { output }, + ReturnTypeKind::Result => quote! { Some(output?) }, + ReturnTypeKind::ResultOption => quote! { output? }, }; Ok(quote! { @@ -653,23 +972,31 @@ impl FunctionAttr { use risingwave_common::types::*; use risingwave_common::buffer::Bitmap; use risingwave_common::util::iter_util::ZipEqFast; - use itertools::multizip; + use risingwave_expr::expr::BoxedExpression; + use risingwave_expr::{Result, ExprError}; + use risingwave_expr::codegen::*; - crate::ensure!(children.len() == #num_args); + risingwave_expr::ensure!(children.len() == #num_args); let mut iter = children.into_iter(); #(let #all_child = iter.next().unwrap();)* - #(let #const_child = #const_child.eval_const()?;)* + #( + let #const_child = #const_child.eval_const()?; + let #const_child = match &#const_child { + Some(s) => s.as_scalar_ref_impl().try_into()?, + // the function should always return empty if any const argument is null + None => return Ok(risingwave_expr::table_function::empty(return_type)), + }; + )* #[derive(Debug)] - #[allow(non_camel_case_types)] struct #struct_name { return_type: DataType, chunk_size: usize, #(#child: BoxedExpression,)* - const_arg: #const_arg_type, + prebuilt_arg: #prebuilt_arg_type, } - #[async_trait::async_trait] - impl crate::table_function::TableFunction for #struct_name { + #[async_trait] + impl risingwave_expr::table_function::TableFunction for #struct_name { fn return_type(&self) -> DataType { self.return_type.clone() } @@ -681,24 +1008,25 @@ impl FunctionAttr { #[try_stream(boxed, ok = DataChunk, error = ExprError)] async fn eval_inner<'a>(&'a self, input: &'a DataChunk) { #( - let #array_refs = self.#child.eval_checked(input).await?; + let #array_refs = self.#child.eval(input).await?; let #arrays: &#arg_arrays = #array_refs.as_ref().into(); )* let mut index_builder = I32ArrayBuilder::new(self.chunk_size); #(let mut #builders = #builder_types::with_type(self.chunk_size, #return_types);)* - for (i, (row, visible)) in multizip((#(#arrays.iter(),)*)).zip_eq_fast(input.vis().iter()).enumerate() { + for (i, (row, visible)) in multizip((#(#arrays.iter(),)*)).zip_eq_fast(input.visibility().iter()).enumerate() { if let (#(Some(#inputs),)*) = row && visible { - let iter = #fn_name(#(#inputs,)* #const_arg); + let iter = #fn_name(#(#inputs,)* #prebuilt_arg); for output in #iter { index_builder.append(Some(i as i32)); match #output { - Some((#(#outputs),*)) => { #(#builders.append(Some(#outputs.as_scalar_ref()));)* } + Some((#(#outputs),*)) => { #(#builders.append(#optioned_outputs);)* } None => { #(#builders.append_null();)* } } if index_builder.len() == self.chunk_size { + let len = index_builder.len(); let index_array = std::mem::replace(&mut index_builder, I32ArrayBuilder::new(self.chunk_size)).finish().into_ref(); let value_arrays = [#(std::mem::replace(&mut #builders, #builder_types::with_type(self.chunk_size, #return_types)).finish().into_ref()),*]; #build_value_array @@ -722,16 +1050,24 @@ impl FunctionAttr { return_type, chunk_size, #(#child,)* - const_arg: #const_arg_value, + prebuilt_arg: #prebuilt_arg_value, })) } }) } } -fn data_type_name(ty: &str) -> TokenStream2 { - let variant = format_ident!("{}", types::data_type(ty)); - quote! { DataTypeName::#variant } +fn sig_data_type(ty: &str) -> TokenStream2 { + match ty { + "any" => quote! { SigDataType::Any }, + "anyarray" => quote! { SigDataType::AnyArray }, + "struct" => quote! { SigDataType::AnyStruct }, + _ if ty.starts_with("struct") && ty.contains("any") => quote! { SigDataType::AnyStruct }, + _ => { + let datatype = data_type(ty); + quote! { SigDataType::Exact(#datatype) } + } + } } fn data_type(ty: &str) -> TokenStream2 { @@ -749,7 +1085,7 @@ fn data_type(ty: &str) -> TokenStream2 { /// Extract multiple output types. /// /// ```ignore -/// output_types("int32") -> ["int32"] +/// output_types("int4") -> ["int4"] /// output_types("struct") -> ["varchar", "jsonb"] /// ``` fn output_types(ty: &str) -> Vec<&str> { diff --git a/src/expr/macro/src/lib.rs b/src/expr/macro/src/lib.rs index aa514c134d750..363fc958b557d 100644 --- a/src/expr/macro/src/lib.rs +++ b/src/expr/macro/src/lib.rs @@ -15,10 +15,14 @@ #![feature(lint_reasons)] #![feature(let_chains)] +use context::DefineContextAttr; use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; -use syn::{Error, Result}; +use syn::{Error, ItemFn, Result}; +use crate::context::{generate_captured_function, CaptureContextAttr}; + +mod context; mod gen; mod parse; mod types; @@ -30,17 +34,20 @@ mod utils; /// /// # Table of Contents /// -/// - [Function Signature](#function-signature) +/// - [SQL Function Signature](#sql-function-signature) /// - [Multiple Function Definitions](#multiple-function-definitions) /// - [Type Expansion](#type-expansion) /// - [Automatic Type Inference](#automatic-type-inference) /// - [Custom Type Inference Function](#custom-type-inference-function) -/// - [Rust Function Requirements](#rust-function-requirements) +/// - [Rust Function Signature](#rust-function-signature) /// - [Nullable Arguments](#nullable-arguments) /// - [Return Value](#return-value) +/// - [Variadic Function](#variadic-function) /// - [Optimization](#optimization) /// - [Functions Returning Strings](#functions-returning-strings) /// - [Preprocessing Constant Arguments](#preprocessing-constant-arguments) +/// - [Context](#context) +/// - [Async Function](#async-function) /// - [Table Function](#table-function) /// - [Registration and Invocation](#registration-and-invocation) /// - [Appendix: Type Matrix](#appendix-type-matrix) @@ -54,24 +61,29 @@ mod utils; /// } /// ``` /// -/// # Function Signature +/// # SQL Function Signature /// /// Each function must have a signature, specified in the `function("...")` part of the macro /// invocation. The signature follows this pattern: /// /// ```text -/// name([arg_types],*) -> [setof] return_type +/// name ( [arg_types],* [...] ) [ -> [setof] return_type ] /// ``` /// -/// Where `name` is the function name, which must match the function name defined in `prost`. +/// Where `name` is the function name in `snake_case`, which must match the function name defined +/// in `prost`. /// -/// The allowed data types are listed in the `name` column of the appendix's [type matrix]. -/// Wildcards or `auto` can also be used, as explained below. +/// `arg_types` is a comma-separated list of argument types. The allowed data types are listed in +/// in the `name` column of the appendix's [type matrix]. Wildcards or `auto` can also be used, as +/// explained below. If the function is variadic, the last argument can be denoted as `...`. /// /// When `setof` appears before the return type, this indicates that the function is a set-returning /// function (table function), meaning it can return multiple values instead of just one. For more /// details, see the section on table functions. /// +/// If no return type is specified, the function returns `void`. However, the void type is not +/// supported in our type system, so it now returns a null value of type int. +/// /// ## Multiple Function Definitions /// /// Multiple `#[function]` macros can be applied to a single generic Rust function to define @@ -146,14 +158,14 @@ mod utils; /// /// ```ignore /// #[function( -/// "unnest(list) -> setof any", +/// "unnest(anyarray) -> setof any", /// type_infer = "|args| Ok(args[0].unnest_list())" /// )] /// ``` /// /// This type inference function will be invoked at the frontend. /// -/// # Rust Function Requirements +/// # Rust Function Signature /// /// The `#[function]` macro can handle various types of Rust functions. /// @@ -164,7 +176,7 @@ mod utils; /// For instance: /// /// ```ignore -/// #[function("trim_array(list, int32) -> list")] +/// #[function("trim_array(anyarray, int32) -> anyarray")] /// fn trim_array(array: ListRef<'_>, n: i32) -> ListValue {...} /// ``` /// @@ -174,7 +186,7 @@ mod utils; /// to be considered, the `Option` type can be used: /// /// ```ignore -/// #[function("trim_array(list, int32) -> list")] +/// #[function("trim_array(anyarray, int32) -> anyarray")] /// fn trim_array(array: Option>, n: Option) -> ListValue {...} /// ``` /// @@ -196,6 +208,23 @@ mod utils; /// matrix]) and do not contain any Option or Result, the `#[function]` macro will automatically /// generate SIMD vectorized execution code. /// +/// Therefore, try to avoid returning `Option` and `Result` whenever possible. +/// +/// ## Variadic Function +/// +/// Variadic functions accept a `impl Row` input to represent tailing arguments. +/// For example: +/// +/// ```ignore +/// #[function("concat_ws(varchar, ...) -> varchar")] +/// fn concat_ws(sep: &str, vals: impl Row) -> Option> { +/// let mut string_iter = vals.iter().flatten(); +/// // ... +/// } +/// ``` +/// +/// See `risingwave_common::row::Row` for more details. +/// /// ## Functions Returning Strings /// /// For functions that return varchar types, you can also use the writer style function signature to @@ -203,7 +232,7 @@ mod utils; /// /// ```ignore /// #[function("trim(varchar) -> varchar")] -/// pub fn trim(s: &str, writer: &mut dyn Write) { +/// fn trim(s: &str, writer: &mut impl Write) { /// writer.write_str(s.trim()).unwrap(); /// } /// ``` @@ -212,12 +241,26 @@ mod utils; /// /// ```ignore /// #[function("trim(varchar) -> varchar")] -/// pub fn trim(s: &str, writer: &mut dyn Write) -> Result<()> { +/// fn trim(s: &str, writer: &mut impl Write) -> Result<()> { /// writer.write_str(s.trim()).unwrap(); /// Ok(()) /// } /// ``` /// +/// If null values may be returned, then the return value should be `Option<()>`: +/// +/// ```ignore +/// #[function("trim(varchar) -> varchar")] +/// fn trim(s: &str, writer: &mut impl Write) -> Option<()> { +/// if s.is_empty() { +/// None +/// } else { +/// writer.write_str(s.trim()).unwrap(); +/// Some(()) +/// } +/// } +/// ``` +/// /// ## Preprocessing Constant Arguments /// /// When some input arguments of the function are constants, they can be preprocessed to avoid @@ -237,12 +280,41 @@ mod utils; /// /// The `prebuild` argument can be specified, and its value is a Rust expression used to construct a /// new variable from the input arguments of the function. Here `$1`, `$2` represent the second and -/// third arguments of the function (indexed from 0), and their types are `Datum`. In the Rust +/// third arguments of the function (indexed from 0), and their types are `&str`. In the Rust /// function signature, these positions of parameters will be omitted, replaced by an extra new /// variable at the end. /// -/// TODO: This macro will support both variable and constant inputs, and automatically optimize the -/// preprocessing of constants. Currently, it only supports constant inputs. +/// This macro generates two versions of the function. If all the input parameters that `prebuild` +/// depends on are constants, it will precompute them during the build function. Otherwise, it will +/// compute them for each input row during evaluation. This way, we support both constant and variable +/// inputs while optimizing performance for constant inputs. +/// +/// ## Context +/// +/// If a function needs to obtain type information at runtime, you can add an `&Context` parameter to +/// the function signature. For example: +/// +/// ```ignore +/// #[function("foo(int32) -> int64")] +/// fn foo(a: i32, ctx: &Context) -> i64 { +/// assert_eq!(ctx.arg_types[0], DataType::Int32); +/// assert_eq!(ctx.return_type, DataType::Int64); +/// // ... +/// } +/// ``` +/// +/// ## Async Function +/// +/// Functions can be asynchronous. +/// +/// ```ignore +/// #[function("pg_sleep(float64)")] +/// async fn pg_sleep(second: F64) { +/// tokio::time::sleep(Duration::from_secs_f64(second.0)).await; +/// } +/// ``` +/// +/// Asynchronous functions will be evaluated on rows sequentially. /// /// # Table Function /// @@ -300,12 +372,12 @@ mod utils; /// | name | SQL type | owned type | reference type | primitive? | /// | ----------- | ------------------ | ------------- | ------------------ | ---------- | /// | boolean | `boolean` | `bool` | `bool` | yes | -/// | int16 | `smallint` | `i16` | `i16` | yes | -/// | int32 | `integer` | `i32` | `i32` | yes | -/// | int64 | `bigint` | `i64` | `i64` | yes | +/// | int2 | `smallint` | `i16` | `i16` | yes | +/// | int4 | `integer` | `i32` | `i32` | yes | +/// | int8 | `bigint` | `i64` | `i64` | yes | /// | int256 | `rw_int256` | `Int256` | `Int256Ref<'_>` | no | -/// | float32 | `real` | `F32` | `F32` | yes | -/// | float64 | `double precision` | `F64` | `F64` | yes | +/// | float4 | `real` | `F32` | `F32` | yes | +/// | float8 | `double precision` | `F64` | `F64` | yes | /// | decimal | `numeric` | `Decimal` | `Decimal` | yes | /// | serial | `serial` | `Serial` | `Serial` | yes | /// | date | `date` | `Date` | `Date` | yes | @@ -322,7 +394,7 @@ mod utils; /// /// | name | SQL type | owned type | reference type | /// | ---------------------- | -------------------- | ------------- | ------------------ | -/// | list | `any[]` | `ListValue` | `ListRef<'_>` | +/// | anyarray | `any[]` | `ListValue` | `ListRef<'_>` | /// | struct | `record` | `StructValue` | `StructRef<'_>` | /// | T[^1][] | `T[]` | `ListValue` | `ListRef<'_>` | /// | struct | `struct` | `(T, ..)` | `(&T, ..)` | @@ -338,7 +410,7 @@ pub fn function(attr: TokenStream, item: TokenStream) -> TokenStream { let mut tokens: TokenStream2 = item.into(); for attr in fn_attr.expand() { - tokens.extend(attr.generate_descriptor(&user_fn, false)?); + tokens.extend(attr.generate_function_descriptor(&user_fn, false)?); } Ok(tokens) } @@ -356,7 +428,7 @@ pub fn build_function(attr: TokenStream, item: TokenStream) -> TokenStream { let mut tokens: TokenStream2 = item.into(); for attr in fn_attr.expand() { - tokens.extend(attr.generate_descriptor(&user_fn, true)?); + tokens.extend(attr.generate_function_descriptor(&user_fn, true)?); } Ok(tokens) } @@ -370,11 +442,11 @@ pub fn build_function(attr: TokenStream, item: TokenStream) -> TokenStream { pub fn aggregate(attr: TokenStream, item: TokenStream) -> TokenStream { fn inner(attr: TokenStream, item: TokenStream) -> Result { let fn_attr: FunctionAttr = syn::parse(attr)?; - let user_fn: UserFunctionAttr = syn::parse(item.clone())?; + let user_fn: AggregateFnOrImpl = syn::parse(item.clone())?; let mut tokens: TokenStream2 = item.into(); for attr in fn_attr.expand() { - tokens.extend(attr.generate_agg_descriptor(&user_fn, false)?); + tokens.extend(attr.generate_aggregate_descriptor(&user_fn, false)?); } Ok(tokens) } @@ -388,11 +460,11 @@ pub fn aggregate(attr: TokenStream, item: TokenStream) -> TokenStream { pub fn build_aggregate(attr: TokenStream, item: TokenStream) -> TokenStream { fn inner(attr: TokenStream, item: TokenStream) -> Result { let fn_attr: FunctionAttr = syn::parse(attr)?; - let user_fn: UserFunctionAttr = syn::parse(item.clone())?; + let user_fn: AggregateFnOrImpl = syn::parse(item.clone())?; let mut tokens: TokenStream2 = item.into(); for attr in fn_attr.expand() { - tokens.extend(attr.generate_agg_descriptor(&user_fn, true)?); + tokens.extend(attr.generate_aggregate_descriptor(&user_fn, true)?); } Ok(tokens) } @@ -404,76 +476,168 @@ pub fn build_aggregate(attr: TokenStream, item: TokenStream) -> TokenStream { #[derive(Debug, Clone, Default)] struct FunctionAttr { + /// Function name name: String, + /// Input argument types args: Vec, + /// Return type ret: String, + /// Whether it is a table function is_table_function: bool, + /// Whether it is an append-only aggregate function + append_only: bool, + /// Optional function for batch evaluation. batch_fn: Option, + /// State type for aggregate function. + /// If not specified, it will be the same as return type. state: Option, + /// Initial state value for aggregate function. + /// If not specified, it will be NULL. init_state: Option, + /// Prebuild function for arguments. + /// This could be any Rust expression. prebuild: Option, + /// Type inference function. type_infer: Option, + /// Generic type. + generic: Option, + /// Whether the function is volatile. + volatile: bool, + /// Whether the function is deprecated. deprecated: bool, } +/// Attributes from function signature `fn(..)` #[derive(Debug, Clone)] struct UserFunctionAttr { /// Function name name: String, - /// The last argument type is `&mut dyn Write`. + /// Whether the function is async. + async_: bool, + /// Whether contains argument `&Context`. + context: bool, + /// Whether contains argument `&mut impl Write`. write: bool, - /// The last argument type is `retract: bool`. + /// Whether the last argument type is `retract: bool`. retract: bool, /// The argument type are `Option`s. arg_option: bool, - /// The return type. - return_type: ReturnType, - /// The inner type `T` in `impl Iterator` - iterator_item_type: Option, + /// The return type kind. + return_type_kind: ReturnTypeKind, + /// The kind of inner type `T` in `impl Iterator` + iterator_item_kind: Option, + /// The core return type without `Option` or `Result`. + core_return_type: String, /// The number of generic types. generic: usize, /// The span of return type. return_type_span: proc_macro2::Span, - // /// `#[list(0)]` in arguments. - // list: Vec<(usize, usize)>, - // /// `#[struct(0)]` in arguments. - // struct_: Vec<(usize, usize)>, } -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -enum ReturnType { - T, - Option, - Result, - ResultOption, +#[derive(Debug, Clone)] +struct AggregateImpl { + struct_name: String, + accumulate: UserFunctionAttr, + retract: Option, + #[allow(dead_code)] // TODO(wrj): add merge to trait + merge: Option, + finalize: Option, + create_state: Option, + #[allow(dead_code)] // TODO(wrj): support encode + encode_state: Option, + #[allow(dead_code)] // TODO(wrj): support decode + decode_state: Option, } -impl ReturnType { - fn contains_result(&self) -> bool { - matches!(self, ReturnType::Result | ReturnType::ResultOption) +#[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] +enum AggregateFnOrImpl { + /// A simple accumulate/retract function. + Fn(UserFunctionAttr), + /// A full impl block. + Impl(AggregateImpl), +} + +impl AggregateFnOrImpl { + fn as_fn(&self) -> &UserFunctionAttr { + match self { + AggregateFnOrImpl::Fn(attr) => attr, + _ => panic!("expect fn"), + } + } + + fn accumulate(&self) -> &UserFunctionAttr { + match self { + AggregateFnOrImpl::Fn(attr) => attr, + AggregateFnOrImpl::Impl(impl_) => &impl_.accumulate, + } } - fn contains_option(&self) -> bool { - matches!(self, ReturnType::Option | ReturnType::ResultOption) + fn has_retract(&self) -> bool { + match self { + AggregateFnOrImpl::Fn(fn_) => fn_.retract, + AggregateFnOrImpl::Impl(impl_) => impl_.retract.is_some(), + } } } +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum ReturnTypeKind { + T, + Option, + Result, + ResultOption, +} + impl FunctionAttr { /// Return a unique name that can be used as an identifier. fn ident_name(&self) -> String { format!("{}_{}_{}", self.name, self.args.join("_"), self.ret) - .replace("[]", "list") + .replace("[]", "array") + .replace("...", "variadic") .replace(['<', '>', ' ', ','], "_") .replace("__", "_") } } impl UserFunctionAttr { - fn is_writer_style(&self) -> bool { - self.write && !self.arg_option + /// Returns true if the function is like `fn(T1, T2, .., Tn) -> T`. + fn is_pure(&self) -> bool { + !self.async_ + && !self.write + && !self.context + && !self.arg_option + && self.return_type_kind == ReturnTypeKind::T } +} - fn is_pure(&self) -> bool { - !self.write && !self.arg_option && self.return_type == ReturnType::T +/// Define the context variables which can be used by risingwave expressions. +#[proc_macro] +pub fn define_context(def: TokenStream) -> TokenStream { + fn inner(def: TokenStream) -> Result { + let attr: DefineContextAttr = syn::parse(def)?; + attr.gen() + } + + match inner(def) { + Ok(tokens) => tokens.into(), + Err(e) => e.to_compile_error().into(), + } +} + +/// Capture the context from the local context to the function impl. +/// TODO: The macro will be merged to [`#[function(.., capture_context(..))]`](macro@function) later. +/// +/// Currently, we should use the macro separately with a simple wrapper. +#[proc_macro_attribute] +pub fn capture_context(attr: TokenStream, item: TokenStream) -> TokenStream { + fn inner(attr: TokenStream, item: TokenStream) -> Result { + let attr: CaptureContextAttr = syn::parse(attr)?; + let user_fn: ItemFn = syn::parse(item)?; + generate_captured_function(attr, user_fn) + } + match inner(attr, item) { + Ok(tokens) => tokens.into(), + Err(e) => e.to_compile_error().into(), } } diff --git a/src/expr/macro/src/parse.rs b/src/expr/macro/src/parse.rs index 066705f410601..24cc6942afcee 100644 --- a/src/expr/macro/src/parse.rs +++ b/src/expr/macro/src/parse.rs @@ -14,6 +14,7 @@ //! Parse the tokens of the macro. +use quote::ToTokens; use syn::parse::{Parse, ParseStream}; use syn::spanned::Spanned; use syn::{LitStr, Token}; @@ -27,9 +28,10 @@ impl Parse for FunctionAttr { let sig = input.parse::()?; let sig_str = sig.value(); - let (name_args, ret) = sig_str - .split_once("->") - .ok_or_else(|| Error::new_spanned(&sig, "expected '->'"))?; + let (name_args, ret) = match sig_str.split_once("->") { + Some((name_args, ret)) => (name_args, ret), + None => (sig_str.as_str(), "void"), + }; let (name, args) = name_args .split_once('(') .ok_or_else(|| Error::new_spanned(&sig, "expected '('"))?; @@ -73,8 +75,14 @@ impl Parse for FunctionAttr { parsed.prebuild = Some(get_value()?); } else if meta.path().is_ident("type_infer") { parsed.type_infer = Some(get_value()?); + } else if meta.path().is_ident("generic") { + parsed.generic = Some(get_value()?); + } else if meta.path().is_ident("volatile") { + parsed.volatile = true; } else if meta.path().is_ident("deprecated") { parsed.deprecated = true; + } else if meta.path().is_ident("append_only") { + parsed.append_only = true; } else { return Err(Error::new( meta.span(), @@ -89,43 +97,117 @@ impl Parse for FunctionAttr { impl Parse for UserFunctionAttr { fn parse(input: ParseStream<'_>) -> Result { let itemfn: syn::ItemFn = input.parse()?; - let sig = &itemfn.sig; - let (return_type, iterator_item_type) = match &sig.output { - syn::ReturnType::Default => (ReturnType::T, None), + Ok(UserFunctionAttr::from(&itemfn.sig)) + } +} + +impl From<&syn::Signature> for UserFunctionAttr { + fn from(sig: &syn::Signature) -> Self { + let (return_type_kind, iterator_item_kind, core_return_type) = match &sig.output { + syn::ReturnType::Default => (ReturnTypeKind::T, None, "()".into()), syn::ReturnType::Type(_, ty) => { - let (return_type, inner) = check_type(ty); - let iterator_item_type = strip_iterator(inner).map(|ty| check_type(ty).0); - (return_type, iterator_item_type) + let (kind, inner) = check_type(ty); + match strip_iterator(inner) { + Some(ty) => { + let (inner_kind, inner) = check_type(ty); + (kind, Some(inner_kind), inner.to_token_stream().to_string()) + } + None => (kind, None, inner.to_token_stream().to_string()), + } } }; - Ok(UserFunctionAttr { + UserFunctionAttr { name: sig.ident.to_string(), - write: last_arg_is_write(sig), + async_: sig.asyncness.is_some(), + write: sig.inputs.iter().any(arg_is_write), + context: sig.inputs.iter().any(arg_is_context), retract: last_arg_is_retract(sig), arg_option: args_contain_option(sig), - return_type, - iterator_item_type, + return_type_kind, + iterator_item_kind, + core_return_type, generic: sig.generics.params.len(), return_type_span: sig.output.span(), + } + } +} + +impl Parse for AggregateImpl { + fn parse(input: ParseStream<'_>) -> Result { + let itemimpl: syn::ItemImpl = input.parse()?; + let parse_function = |name: &str| { + itemimpl.items.iter().find_map(|item| match item { + syn::ImplItem::Fn(syn::ImplItemFn { sig, .. }) if sig.ident == name => { + Some(UserFunctionAttr::from(sig)) + } + _ => None, + }) + }; + let self_path = itemimpl.self_ty.to_token_stream().to_string(); + let struct_name = match self_path.split_once('<') { + Some((path, _)) => path.trim().into(), // remove generic parameters + None => self_path, + }; + Ok(AggregateImpl { + struct_name, + accumulate: parse_function("accumulate").expect("expect accumulate function"), + retract: parse_function("retract"), + merge: parse_function("merge"), + finalize: parse_function("finalize"), + create_state: parse_function("create_state"), + encode_state: parse_function("encode_state"), + decode_state: parse_function("decode_state"), }) } } -/// Check if the last argument is `&mut dyn Write`. -fn last_arg_is_write(sig: &syn::Signature) -> bool { - let Some(syn::FnArg::Typed(arg)) = sig.inputs.last() else { +impl Parse for AggregateFnOrImpl { + fn parse(input: ParseStream<'_>) -> Result { + // consume attributes + let _ = input.call(syn::Attribute::parse_outer)?; + if input.peek(Token![impl]) { + Ok(AggregateFnOrImpl::Impl(input.parse()?)) + } else { + Ok(AggregateFnOrImpl::Fn(input.parse()?)) + } + } +} + +/// Check if the argument is `&mut impl Write`. +fn arg_is_write(arg: &syn::FnArg) -> bool { + let syn::FnArg::Typed(arg) = arg else { return false; }; let syn::Type::Reference(syn::TypeReference { elem, .. }) = arg.ty.as_ref() else { return false; }; - let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = elem.as_ref() else { + let syn::Type::ImplTrait(syn::TypeImplTrait { bounds, .. }) = elem.as_ref() else { return false; }; let Some(syn::TypeParamBound::Trait(syn::TraitBound { path, .. })) = bounds.first() else { return false; }; - path.segments.last().map_or(false, |s| s.ident == "Write") + let Some(seg) = path.segments.last() else { + return false; + }; + seg.ident == "Write" +} + +/// Check if the argument is `&Context`. +fn arg_is_context(arg: &syn::FnArg) -> bool { + let syn::FnArg::Typed(arg) = arg else { + return false; + }; + let syn::Type::Reference(syn::TypeReference { elem, .. }) = arg.ty.as_ref() else { + return false; + }; + let syn::Type::Path(path) = elem.as_ref() else { + return false; + }; + let Some(seg) = path.path.segments.last() else { + return false; + }; + seg.ident == "Context" } /// Check if the last argument is `retract: bool`. @@ -162,19 +244,19 @@ fn args_contain_option(sig: &syn::Signature) -> bool { } /// Check the return type. -fn check_type(ty: &syn::Type) -> (ReturnType, &syn::Type) { +fn check_type(ty: &syn::Type) -> (ReturnTypeKind, &syn::Type) { if let Some(inner) = strip_outer_type(ty, "Result") { if let Some(inner) = strip_outer_type(inner, "Option") { - (ReturnType::ResultOption, inner) + (ReturnTypeKind::ResultOption, inner) } else { - (ReturnType::Result, inner) + (ReturnTypeKind::Result, inner) } } else if let Some(inner) = strip_outer_type(ty, "Option") { - (ReturnType::Option, inner) + (ReturnTypeKind::Option, inner) } else if let Some(inner) = strip_outer_type(ty, "DatumRef") { - (ReturnType::Option, inner) + (ReturnTypeKind::Option, inner) } else { - (ReturnType::T, ty) + (ReturnTypeKind::T, ty) } } diff --git a/src/expr/macro/src/types.rs b/src/expr/macro/src/types.rs index b29ceeced2f25..9dcd37b401f35 100644 --- a/src/expr/macro/src/types.rs +++ b/src/expr/macro/src/types.rs @@ -14,77 +14,65 @@ //! This module provides utility functions for SQL data type conversion and manipulation. -// name data type variant array type owned type ref type primitive +// name data type array type owned type ref type primitive const TYPE_MATRIX: &str = " - boolean Boolean Bool BoolArray bool bool _ - int16 Int16 Int16 I16Array i16 i16 y - int32 Int32 Int32 I32Array i32 i32 y - int64 Int64 Int64 I64Array i64 i64 y - int256 Int256 Int256 Int256Array Int256 Int256Ref<'_> _ - float32 Float32 Float32 F32Array F32 F32 y - float64 Float64 Float64 F64Array F64 F64 y - decimal Decimal Decimal DecimalArray Decimal Decimal y - serial Serial Serial SerialArray Serial Serial y - date Date Date DateArray Date Date y - time Time Time TimeArray Time Time y - timestamp Timestamp Timestamp TimestampArray Timestamp Timestamp y - timestamptz Timestamptz Timestamptz TimestamptzArray Timestamptz Timestamptz y - interval Interval Interval IntervalArray Interval Interval y - varchar Varchar Utf8 Utf8Array Box &str _ - bytea Bytea Bytea BytesArray Box<[u8]> &[u8] _ - jsonb Jsonb Jsonb JsonbArray JsonbVal JsonbRef<'_> _ - list List List ListArray ListValue ListRef<'_> _ - struct Struct Struct StructArray StructValue StructRef<'_> _ + boolean Boolean BoolArray bool bool _ + int2 Int16 I16Array i16 i16 y + int4 Int32 I32Array i32 i32 y + int8 Int64 I64Array i64 i64 y + int256 Int256 Int256Array Int256 Int256Ref<'_> _ + float4 Float32 F32Array F32 F32 y + float8 Float64 F64Array F64 F64 y + decimal Decimal DecimalArray Decimal Decimal y + serial Serial SerialArray Serial Serial y + date Date DateArray Date Date y + time Time TimeArray Time Time y + timestamp Timestamp TimestampArray Timestamp Timestamp y + timestamptz Timestamptz TimestamptzArray Timestamptz Timestamptz y + interval Interval IntervalArray Interval Interval y + varchar Varchar Utf8Array Box &str _ + bytea Bytea BytesArray Box<[u8]> &[u8] _ + jsonb Jsonb JsonbArray JsonbVal JsonbRef<'_> _ + anyarray List ListArray ListValue ListRef<'_> _ + struct Struct StructArray StructValue StructRef<'_> _ + any ??? ArrayImpl ScalarImpl ScalarRefImpl<'_> _ "; /// Maps a data type to its corresponding data type name. pub fn data_type(ty: &str) -> &str { - // XXX: - // For functions that contain `any` type, there are special handlings in the frontend, - // and the signature won't be accessed. So we simply return a placeholder here. - if ty == "any" { - return "Int32"; - } else if ty.ends_with("[]") { - return "List"; - } else if ty.starts_with("struct") { - return "Struct"; - } lookup_matrix(ty, 1) } -/// Maps a data type to its corresponding variant name. -pub fn variant(ty: &str) -> &str { - lookup_matrix(ty, 2) -} - /// Maps a data type to its corresponding array type name. pub fn array_type(ty: &str) -> &str { - if ty == "any" { - return "ArrayImpl"; - } else if ty.ends_with("[]") { - return "ListArray"; - } else if ty.starts_with("struct") { - return "StructArray"; - } - lookup_matrix(ty, 3) + lookup_matrix(ty, 2) } /// Maps a data type to its corresponding `Scalar` type name. pub fn owned_type(ty: &str) -> &str { - lookup_matrix(ty, 4) + lookup_matrix(ty, 3) } /// Maps a data type to its corresponding `ScalarRef` type name. pub fn ref_type(ty: &str) -> &str { - lookup_matrix(ty, 5) + lookup_matrix(ty, 4) } /// Checks if a data type is primitive. pub fn is_primitive(ty: &str) -> bool { - lookup_matrix(ty, 6) == "y" + lookup_matrix(ty, 5) == "y" } -fn lookup_matrix(ty: &str, idx: usize) -> &str { +fn lookup_matrix(mut ty: &str, idx: usize) -> &str { + if ty.ends_with("[]") { + ty = "anyarray"; + } else if ty.starts_with("struct") { + ty = "struct"; + } else if ty == "void" { + // XXX: we don't support void type yet. + // replace it with int for now. + ty = "int4"; + } let s = TYPE_MATRIX.trim().lines().find_map(|line| { let mut parts = line.split_whitespace(); if parts.next() == Some(ty) { @@ -103,10 +91,10 @@ pub fn expand_type_wildcard(ty: &str) -> Vec<&str> { .trim() .lines() .map(|l| l.split_whitespace().next().unwrap()) + .filter(|l| *l != "any") .collect(), - "*int" => vec!["int16", "int32", "int64"], - "*numeric" => vec!["decimal"], - "*float" => vec!["float32", "float64"], + "*int" => vec!["int2", "int4", "int8"], + "*float" => vec!["float4", "float8"], _ => vec![ty], } } @@ -122,32 +110,32 @@ pub fn min_compatible_type(types: &[impl AsRef]) -> &str { match (types[0].as_ref(), types[1].as_ref()) { (a, b) if a == b => a, - ("int16", "int16") => "int16", - ("int16", "int32") => "int32", - ("int16", "int64") => "int64", + ("int2", "int2") => "int2", + ("int2", "int4") => "int4", + ("int2", "int8") => "int8", - ("int32", "int16") => "int32", - ("int32", "int32") => "int32", - ("int32", "int64") => "int64", + ("int4", "int2") => "int4", + ("int4", "int4") => "int4", + ("int4", "int8") => "int8", - ("int64", "int16") => "int64", - ("int64", "int32") => "int64", - ("int64", "int64") => "int64", + ("int8", "int2") => "int8", + ("int8", "int4") => "int8", + ("int8", "int8") => "int8", - ("int16", "int256") => "int256", - ("int32", "int256") => "int256", - ("int64", "int256") => "int256", - ("int256", "int16") => "int256", - ("int256", "int32") => "int256", - ("int256", "int64") => "int256", - ("int256", "float64") => "float64", - ("float64", "int256") => "float64", + ("int2", "int256") => "int256", + ("int4", "int256") => "int256", + ("int8", "int256") => "int256", + ("int256", "int2") => "int256", + ("int256", "int4") => "int256", + ("int256", "int8") => "int256", + ("int256", "float8") => "float8", + ("float8", "int256") => "float8", - ("float32", "float32") => "float32", - ("float32", "float64") => "float64", + ("float4", "float4") => "float4", + ("float4", "float8") => "float8", - ("float64", "float32") => "float64", - ("float64", "float64") => "float64", + ("float8", "float4") => "float8", + ("float8", "float8") => "float8", ("decimal", "decimal") => "decimal", diff --git a/src/expr/macro/src/utils.rs b/src/expr/macro/src/utils.rs index 788d09857cc93..74fddf4680db9 100644 --- a/src/expr/macro/src/utils.rs +++ b/src/expr/macro/src/utils.rs @@ -12,6 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +use proc_macro2::Ident; +use syn::spanned::Spanned; +use syn::{Token, VisRestricted, Visibility}; + /// Convert a string from `snake_case` to `CamelCase`. pub fn to_camel_case(input: &str) -> String { input @@ -27,3 +31,69 @@ pub fn to_camel_case(input: &str) -> String { }) .collect() } + +pub(crate) fn extend_vis_with_super(vis: Visibility) -> Visibility { + let Visibility::Restricted(vis) = vis else { + return vis; + }; + let VisRestricted { + pub_token, + paren_token, + mut in_token, + mut path, + } = vis; + let first_segment = path.segments.first_mut().unwrap(); + if first_segment.ident == "self" { + *first_segment = Ident::new("super", first_segment.span()).into(); + } else if first_segment.ident == "super" { + let span = first_segment.span(); + path.segments.insert(0, Ident::new("super", span).into()); + in_token.get_or_insert(Token![in](in_token.span())); + } + Visibility::Restricted(VisRestricted { + pub_token, + paren_token, + in_token, + path, + }) +} + +#[cfg(test)] +mod tests { + use quote::ToTokens; + use syn::Visibility; + + use crate::utils::extend_vis_with_super; + + #[test] + fn test_extend_vis_with_super() { + let cases = [ + ("pub", "pub"), + ("pub(crate)", "pub(crate)"), + ("pub(self)", "pub(super)"), + ("pub(super)", "pub(in super::super)"), + ("pub(in self)", "pub(in super)"), + ( + "pub(in self::context::data)", + "pub(in super::context::data)", + ), + ( + "pub(in super::context::data)", + "pub(in super::super::context::data)", + ), + ("pub(in crate::func::impl_)", "pub(in crate::func::impl_)"), + ( + "pub(in ::risingwave_expr::func::impl_)", + "pub(in ::risingwave_expr::func::impl_)", + ), + ]; + for (input, expected) in cases { + let input: Visibility = syn::parse_str(input).unwrap(); + let expected: Visibility = syn::parse_str(expected).unwrap(); + let output = extend_vis_with_super(input); + let expected = expected.into_token_stream().to_string(); + let output = output.into_token_stream().to_string(); + assert_eq!(expected, output); + } + } +} diff --git a/src/expr/src/expr/expr_array_concat.rs b/src/expr/src/expr/expr_array_concat.rs deleted file mode 100644 index bc10eec07b72a..0000000000000 --- a/src/expr/src/expr/expr_array_concat.rs +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{ArrayRef, DataChunk, ListValue}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{ - DataType, Datum, DatumRef, ScalarRefImpl, ToDatumRef, ToOwnedDatum, -}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_pb::expr::expr_node::{RexNode, Type}; -use risingwave_pb::expr::ExprNode; - -use crate::expr::{build_from_prost as expr_build_from_prost, BoxedExpression, Expression}; -use crate::{bail, ensure, ExprError, Result}; - -#[derive(Debug, Copy, Clone)] -enum Operation { - ConcatArray, - AppendArray, - PrependArray, - AppendValue, - PrependValue, -} - -pub struct ArrayConcatExpression { - return_type: DataType, - left: BoxedExpression, - right: BoxedExpression, - op: Operation, - op_func: fn(DatumRef<'_>, DatumRef<'_>) -> Datum, -} - -impl std::fmt::Debug for ArrayConcatExpression { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ArrayConcatExpression") - .field("return_type", &self.return_type) - .field("left", &self.left) - .field("right", &self.right) - .field("op", &self.op) - .finish() - } -} - -impl ArrayConcatExpression { - fn new( - return_type: DataType, - left: BoxedExpression, - right: BoxedExpression, - op: Operation, - ) -> Self { - Self { - return_type, - left, - right, - op, - op_func: match op { - Operation::ConcatArray => Self::concat_array, - Operation::AppendArray => Self::append_array, - Operation::PrependArray => Self::prepend_array, - Operation::AppendValue => Self::append_value, - Operation::PrependValue => Self::prepend_value, - }, - } - } - - /// Concatenates two arrays with same data type. - /// The behavior is the same as PG. - /// - /// Examples: - /// - /// ```slt - /// query T - /// select array_cat(array[66], array[123]); - /// ---- - /// {66,123} - /// - /// query T - /// select array_cat(array[66], null::int[]); - /// ---- - /// {66} - /// - /// query T - /// select array_cat(null::int[], array[123]); - /// ---- - /// {123} - /// ``` - fn concat_array(left: DatumRef<'_>, right: DatumRef<'_>) -> Datum { - match (left, right) { - (None, right) => right.to_owned_datum(), - (left, None) => left.to_owned_datum(), - (Some(ScalarRefImpl::List(left)), Some(ScalarRefImpl::List(right))) => Some( - ListValue::new( - left.iter() - .chain(right.iter()) - .map(|x| x.to_owned_datum()) - .collect(), - ) - .into(), - ), - _ => { - panic!("the operands must be two arrays with the same data type"); - } - } - } - - /// Appends an array as the back element of an array of array. - /// Note the behavior is slightly different from PG. - /// - /// Examples: - /// - /// ```slt - /// query T - /// select array_cat(array[array[66]], array[233]); - /// ---- - /// {{66},{233}} - /// - /// # ignore NULL, same as PG - /// query T - /// select array_cat(array[array[66]], null::int[]); - /// ---- - /// {{66}} - /// - /// # different from PG - /// query T - /// select array_cat(null::int[][], array[233]); - /// ---- - /// {{233}} - /// - /// # same as PG - /// query T - /// select array_cat(null::int[][], null::int[]); - /// ---- - /// NULL - /// ``` - fn append_array(left: DatumRef<'_>, right: DatumRef<'_>) -> Datum { - match (left, right) { - (None, None) => None, - (None, right) => Some(ListValue::new(vec![right.to_owned_datum()]).into()), - (left, None) => left.to_owned_datum(), - (Some(ScalarRefImpl::List(left)), right) => Some( - ListValue::new( - left.iter() - .chain(std::iter::once(right)) - .map(|x| x.to_owned_datum()) - .collect(), - ) - .into(), - ), - _ => { - panic!("the rhs must be compatible to append to lhs"); - } - } - } - - /// Appends a value as the back element of an array. - /// The behavior is the same as PG. - /// - /// Examples: - /// - /// ```slt - /// query T - /// select array_append(array[66], 123); - /// ---- - /// {66,123} - /// - /// query T - /// select array_append(array[66], null::int); - /// ---- - /// {66,NULL} - /// - /// query T - /// select array_append(null::int[], 233); - /// ---- - /// {233} - /// - /// query T - /// select array_append(null::int[], null::int); - /// ---- - /// {NULL} - /// ``` - fn append_value(left: DatumRef<'_>, right: DatumRef<'_>) -> Datum { - match (left, right) { - (None, right) => Some(ListValue::new(vec![right.to_owned_datum()]).into()), - (Some(ScalarRefImpl::List(left)), right) => Some( - ListValue::new( - left.iter() - .chain(std::iter::once(right)) - .map(|x| x.to_owned_datum()) - .collect(), - ) - .into(), - ), - _ => { - panic!("the rhs must be compatible to append to lhs"); - } - } - } - - /// Prepends an array as the front element of an array of array. - /// Note the behavior is slightly different from PG. - /// - /// Examples: - /// - /// ```slt - /// query T - /// select array_cat(array[233], array[array[66]]); - /// ---- - /// {{233},{66}} - /// - /// # ignore NULL, same as PG - /// query T - /// select array_cat(null::int[], array[array[66]]); - /// ---- - /// {{66}} - /// - /// # different from PG - /// query T - /// select array_cat(array[233], null::int[][]); - /// ---- - /// {{233}} - /// - /// # same as PG - /// query T - /// select array_cat(null::int[], null::int[][]); - /// ---- - /// NULL - /// ``` - fn prepend_array(left: DatumRef<'_>, right: DatumRef<'_>) -> Datum { - match (left, right) { - (None, None) => None, - (left, None) => Some(ListValue::new(vec![left.to_owned_datum()]).into()), - (None, right) => right.to_owned_datum(), - (left, Some(ScalarRefImpl::List(right))) => Some( - ListValue::new( - std::iter::once(left) - .chain(right.iter()) - .map(|x| x.to_owned_datum()) - .collect(), - ) - .into(), - ), - _ => { - panic!("the lhs must be compatible to prepend to rhs"); - } - } - } - - /// Prepends a value as the front element of an array. - /// The behavior is the same as PG. - /// - /// Examples: - /// - /// ```slt - /// query T - /// select array_prepend(123, array[66]); - /// ---- - /// {123,66} - /// - /// query T - /// select array_prepend(null::int, array[66]); - /// ---- - /// {NULL,66} - /// - /// query T - /// select array_prepend(233, null::int[]); - /// ---- - /// {233} - /// - /// query T - /// select array_prepend(null::int, null::int[]); - /// ---- - /// {NULL} - /// ``` - fn prepend_value(left: DatumRef<'_>, right: DatumRef<'_>) -> Datum { - match (left, right) { - (left, None) => Some(ListValue::new(vec![left.to_owned_datum()]).into()), - (left, Some(ScalarRefImpl::List(right))) => Some( - ListValue::new( - std::iter::once(left) - .chain(right.iter()) - .map(|x| x.to_owned_datum()) - .collect(), - ) - .into(), - ), - _ => { - panic!("the lhs must be compatible to prepend to rhs"); - } - } - } - - fn evaluate(&self, left: DatumRef<'_>, right: DatumRef<'_>) -> Datum { - (self.op_func)(left, right) - } -} - -#[async_trait::async_trait] -impl Expression for ArrayConcatExpression { - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, input: &DataChunk) -> Result { - let left_array = self.left.eval_checked(input).await?; - let right_array = self.right.eval_checked(input).await?; - let mut builder = self - .return_type - .create_array_builder(left_array.len() + right_array.len()); - for (vis, (left, right)) in input - .vis() - .iter() - .zip_eq_fast(left_array.iter().zip_eq_fast(right_array.iter())) - { - if !vis { - builder.append_null(); - } else { - builder.append(&self.evaluate(left, right)); - } - } - Ok(Arc::new(builder.finish())) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let left_data = self.left.eval_row(input).await?; - let right_data = self.right.eval_row(input).await?; - Ok(self.evaluate(left_data.to_datum_ref(), right_data.to_datum_ref())) - } -} - -impl<'a> TryFrom<&'a ExprNode> for ArrayConcatExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - let RexNode::FuncCall(func_call_node) = prost.get_rex_node()? else { - bail!("expects a RexNode::FuncCall"); - }; - let children = func_call_node.get_children(); - ensure!(children.len() == 2); - let left = expr_build_from_prost(&children[0])?; - let right = expr_build_from_prost(&children[1])?; - let left_type = left.return_type(); - let right_type = right.return_type(); - let ret_type = DataType::from(prost.get_return_type()?); - let op = match prost.get_function_type()? { - // the types are checked in frontend, so no need for type checking here - Type::ArrayCat => { - if left_type == right_type { - Operation::ConcatArray - } else if left_type == ret_type { - Operation::AppendArray - } else if right_type == ret_type { - Operation::PrependArray - } else { - bail!("function call node invalid"); - } - } - Type::ArrayAppend => Operation::AppendValue, - Type::ArrayPrepend => Operation::PrependValue, - _ => bail!("expects `ArrayCat`|`ArrayAppend`|`ArrayPrepend`"), - }; - Ok(Self::new(ret_type, left, right, op)) - } -} - -#[cfg(test)] -mod tests { - use itertools::Itertools; - use risingwave_common::array::DataChunk; - use risingwave_common::buffer::Bitmap; - use risingwave_common::types::ScalarImpl; - use risingwave_pb::data::PbDatum; - use risingwave_pb::expr::expr_node::{PbType, RexNode}; - use risingwave_pb::expr::{ExprNode, FunctionCall}; - - use super::*; - use crate::expr::{Expression, LiteralExpression}; - - fn make_i64_expr_node(value: i64) -> ExprNode { - ExprNode { - function_type: PbType::Unspecified as _, - return_type: Some(DataType::Int64.to_protobuf()), - rex_node: Some(RexNode::Constant(PbDatum { - body: value.to_be_bytes().to_vec(), - })), - } - } - - fn make_i64_array_expr_node(values: Vec) -> ExprNode { - ExprNode { - function_type: PbType::Array as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: values.into_iter().map(make_i64_expr_node).collect(), - })), - } - } - - fn make_i64_array_array_expr_node(values: Vec>) -> ExprNode { - ExprNode { - function_type: PbType::Array as i32, - return_type: Some( - DataType::List(Box::new(DataType::List(Box::new(DataType::Int64)))).to_protobuf(), - ), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: values.into_iter().map(make_i64_array_expr_node).collect(), - })), - } - } - - #[test] - fn test_array_concat_try_from() { - { - let left = make_i64_array_expr_node(vec![42]); - let right = make_i64_array_expr_node(vec![43]); - let expr = ExprNode { - function_type: PbType::ArrayCat as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: vec![left, right], - })), - }; - assert!(ArrayConcatExpression::try_from(&expr).is_ok()); - } - - { - let left = make_i64_array_array_expr_node(vec![vec![42]]); - let right = make_i64_array_array_expr_node(vec![vec![43]]); - let expr = ExprNode { - function_type: PbType::ArrayCat as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: vec![left, right], - })), - }; - assert!(ArrayConcatExpression::try_from(&expr).is_ok()); - } - - { - let left = make_i64_array_expr_node(vec![42]); - let right = make_i64_expr_node(43); - let expr = ExprNode { - function_type: PbType::ArrayAppend as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: vec![left, right], - })), - }; - assert!(ArrayConcatExpression::try_from(&expr).is_ok()); - } - - { - let left = make_i64_array_array_expr_node(vec![vec![42]]); - let right = make_i64_array_expr_node(vec![43]); - let expr = ExprNode { - function_type: PbType::ArrayAppend as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: vec![left, right], - })), - }; - assert!(ArrayConcatExpression::try_from(&expr).is_ok()); - } - - { - let left = make_i64_expr_node(43); - let right = make_i64_array_expr_node(vec![42]); - let expr = ExprNode { - function_type: PbType::ArrayPrepend as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: vec![left, right], - })), - }; - assert!(ArrayConcatExpression::try_from(&expr).is_ok()); - } - - { - let left = make_i64_array_expr_node(vec![43]); - let right = make_i64_array_array_expr_node(vec![vec![42]]); - let expr = ExprNode { - function_type: PbType::ArrayPrepend as i32, - return_type: Some(DataType::List(Box::new(DataType::Int64)).to_protobuf()), - rex_node: Some(RexNode::FuncCall(FunctionCall { - children: vec![left, right], - })), - }; - assert!(ArrayConcatExpression::try_from(&expr).is_ok()); - } - } - - fn make_i64_array_expr(values: Vec) -> BoxedExpression { - LiteralExpression::new( - DataType::List(Box::new(DataType::Int64)), - Some(ListValue::new(values.into_iter().map(|x| Some(x.into())).collect()).into()), - ) - .boxed() - } - - #[tokio::test] - async fn test_array_concat_array_of_primitives() { - let left = make_i64_array_expr(vec![42]); - let right = make_i64_array_expr(vec![43, 44]); - let expr = ArrayConcatExpression::new( - DataType::List(Box::new(DataType::Int64)), - left, - right, - Operation::ConcatArray, - ); - - let chunk = DataChunk::new_dummy(4) - .with_visibility([true, false, true, true].into_iter().collect::()); - let expected_array = Some(ScalarImpl::List(ListValue::new(vec![ - Some(42i64.into()), - Some(43i64.into()), - Some(44i64.into()), - ]))); - let expected = vec![ - expected_array.clone(), - None, - expected_array.clone(), - expected_array, - ]; - let actual = expr - .eval(&chunk) - .await - .unwrap() - .iter() - .map(|v| v.map(|s| s.into_scalar_impl())) - .collect_vec(); - assert_eq!(actual, expected); - } - - // More test cases are in e2e tests. -} diff --git a/src/expr/src/expr/expr_binary_nonnull.rs b/src/expr/src/expr/expr_binary_nonnull.rs deleted file mode 100644 index 767d1b1261b01..0000000000000 --- a/src/expr/src/expr/expr_binary_nonnull.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[cfg(test)] -mod tests { - use risingwave_common::array::interval_array::IntervalArray; - use risingwave_common::array::*; - use risingwave_common::types::test_utils::IntervalTestExt; - use risingwave_common::types::{Date, Decimal, Interval, Scalar}; - use risingwave_pb::expr::expr_node::Type; - - use super::super::*; - use crate::vector_op::arithmetic_op::{date_interval_add, date_interval_sub}; - - #[tokio::test] - async fn test_binary() { - test_binary_i32::(|x, y| x + y, Type::Add).await; - test_binary_i32::(|x, y| x - y, Type::Subtract).await; - test_binary_i32::(|x, y| x * y, Type::Multiply).await; - test_binary_i32::(|x, y| x / y, Type::Divide).await; - test_binary_i32::(|x, y| x == y, Type::Equal).await; - test_binary_i32::(|x, y| x != y, Type::NotEqual).await; - test_binary_i32::(|x, y| x > y, Type::GreaterThan).await; - test_binary_i32::(|x, y| x >= y, Type::GreaterThanOrEqual).await; - test_binary_i32::(|x, y| x < y, Type::LessThan).await; - test_binary_i32::(|x, y| x <= y, Type::LessThanOrEqual).await; - test_binary_decimal::(|x, y| x + y, Type::Add).await; - test_binary_decimal::(|x, y| x - y, Type::Subtract).await; - test_binary_decimal::(|x, y| x * y, Type::Multiply).await; - test_binary_decimal::(|x, y| x / y, Type::Divide).await; - test_binary_decimal::(|x, y| x == y, Type::Equal).await; - test_binary_decimal::(|x, y| x != y, Type::NotEqual).await; - test_binary_decimal::(|x, y| x > y, Type::GreaterThan).await; - test_binary_decimal::(|x, y| x >= y, Type::GreaterThanOrEqual).await; - test_binary_decimal::(|x, y| x < y, Type::LessThan).await; - test_binary_decimal::(|x, y| x <= y, Type::LessThanOrEqual).await; - test_binary_interval::( - |x, y| date_interval_add(x, y).unwrap(), - Type::Add, - ) - .await; - test_binary_interval::( - |x, y| date_interval_sub(x, y).unwrap(), - Type::Subtract, - ) - .await; - } - - async fn test_binary_i32(f: F, kind: Type) - where - A: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> ::RefItem<'a>: PartialEq, - F: Fn(i32, i32) -> ::OwnedItem, - { - let mut lhs = Vec::>::new(); - let mut rhs = Vec::>::new(); - let mut target = Vec::::OwnedItem>>::new(); - for i in 0..100 { - if i % 2 == 0 { - lhs.push(Some(i)); - rhs.push(None); - target.push(None); - } else if i % 3 == 0 { - lhs.push(Some(i)); - rhs.push(Some(i + 1)); - target.push(Some(f(i, i + 1))); - } else if i % 5 == 0 { - lhs.push(Some(i + 1)); - rhs.push(Some(i)); - target.push(Some(f(i + 1, i))); - } else { - lhs.push(Some(i)); - rhs.push(Some(i)); - target.push(Some(f(i, i))); - } - } - - let col1 = I32Array::from_iter(&lhs).into_ref(); - let col2 = I32Array::from_iter(&rhs).into_ref(); - let data_chunk = DataChunk::new(vec![col1, col2], 100); - let expr = build_func( - kind, - match kind { - Type::Add | Type::Subtract | Type::Multiply | Type::Divide => DataType::Int32, - _ => DataType::Boolean, - }, - vec![ - InputRefExpression::new(DataType::Int32, 0).boxed(), - InputRefExpression::new(DataType::Int32, 1).boxed(), - ], - ) - .unwrap(); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &A = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..lhs.len() { - let row = OwnedRow::new(vec![ - lhs[i].map(|int| int.to_scalar_value()), - rhs[i].map(|int| int.to_scalar_value()), - ]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); - assert_eq!(result, expected); - } - } - - async fn test_binary_interval(f: F, kind: Type) - where - A: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> ::RefItem<'a>: PartialEq, - F: Fn(Date, Interval) -> ::OwnedItem, - { - let mut lhs = Vec::>::new(); - let mut rhs = Vec::>::new(); - let mut target = Vec::::OwnedItem>>::new(); - for i in 0..100 { - if i % 2 == 0 { - rhs.push(Some(Interval::from_ymd(0, i, i))); - lhs.push(None); - target.push(None); - } else { - rhs.push(Some(Interval::from_ymd(0, i, i))); - lhs.push(Some(Date::from_num_days_from_ce_uncheck(i))); - target.push(Some(f( - Date::from_num_days_from_ce_uncheck(i), - Interval::from_ymd(0, i, i), - ))); - } - } - - let col1 = DateArray::from_iter(&lhs).into_ref(); - let col2 = IntervalArray::from_iter(&rhs).into_ref(); - let data_chunk = DataChunk::new(vec![col1, col2], 100); - let expr = build_from_pretty(format!("({kind:?}:timestamp $0:date $1:interval)")); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &A = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..lhs.len() { - let row = OwnedRow::new(vec![ - lhs[i].map(|date| date.to_scalar_value()), - rhs[i].map(|date| date.to_scalar_value()), - ]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); - assert_eq!(result, expected); - } - } - - async fn test_binary_decimal(f: F, kind: Type) - where - A: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> ::RefItem<'a>: PartialEq, - F: Fn(Decimal, Decimal) -> ::OwnedItem, - { - let mut lhs = Vec::>::new(); - let mut rhs = Vec::>::new(); - let mut target = Vec::::OwnedItem>>::new(); - for i in 0..100 { - if i % 2 == 0 { - lhs.push(Some(i.into())); - rhs.push(None); - target.push(None); - } else if i % 3 == 0 { - lhs.push(Some(i.into())); - rhs.push(Some((i + 1).into())); - target.push(Some(f((i).into(), (i + 1).into()))); - } else if i % 5 == 0 { - lhs.push(Some((i + 1).into())); - rhs.push(Some((i).into())); - target.push(Some(f((i + 1).into(), (i).into()))); - } else { - lhs.push(Some((i).into())); - rhs.push(Some((i).into())); - target.push(Some(f((i).into(), (i).into()))); - } - } - - let col1 = DecimalArray::from_iter(&lhs).into_ref(); - let col2 = DecimalArray::from_iter(&rhs).into_ref(); - let data_chunk = DataChunk::new(vec![col1, col2], 100); - let expr = build_func( - kind, - match kind { - Type::Add | Type::Subtract | Type::Multiply | Type::Divide => DataType::Decimal, - _ => DataType::Boolean, - }, - vec![ - InputRefExpression::new(DataType::Decimal, 0).boxed(), - InputRefExpression::new(DataType::Decimal, 1).boxed(), - ], - ) - .unwrap(); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &A = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..lhs.len() { - let row = OwnedRow::new(vec![ - lhs[i].map(|dec| dec.to_scalar_value()), - rhs[i].map(|dec| dec.to_scalar_value()), - ]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); - assert_eq!(result, expected); - } - } -} diff --git a/src/expr/src/expr/expr_concat_ws.rs b/src/expr/src/expr/expr_concat_ws.rs deleted file mode 100644 index 5bca7d0aea75c..0000000000000 --- a/src/expr/src/expr/expr_concat_ws.rs +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::convert::TryFrom; -use std::fmt::Write; -use std::sync::Arc; - -use risingwave_common::array::{ - Array, ArrayBuilder, ArrayImpl, ArrayRef, DataChunk, Utf8ArrayBuilder, -}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum}; -use risingwave_pb::expr::expr_node::{RexNode, Type}; -use risingwave_pb::expr::ExprNode; - -use crate::expr::{build_from_prost as expr_build_from_prost, BoxedExpression, Expression}; -use crate::{bail, ensure, ExprError, Result}; - -#[derive(Debug)] -pub struct ConcatWsExpression { - return_type: DataType, - sep_expr: BoxedExpression, - string_exprs: Vec, -} - -#[async_trait::async_trait] -impl Expression for ConcatWsExpression { - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, input: &DataChunk) -> Result { - let sep_column = self.sep_expr.eval_checked(input).await?; - let sep_column = sep_column.as_utf8(); - - let mut string_columns = Vec::with_capacity(self.string_exprs.len()); - for expr in &self.string_exprs { - string_columns.push(expr.eval_checked(input).await?); - } - let string_columns_ref = string_columns - .iter() - .map(|c| c.as_utf8()) - .collect::>(); - - let row_len = input.capacity(); - let vis = input.vis(); - let mut builder = Utf8ArrayBuilder::new(row_len); - - for row_idx in 0..row_len { - if !vis.is_set(row_idx) { - builder.append(None); - continue; - } - let sep = match sep_column.value_at(row_idx) { - Some(sep) => sep, - None => { - builder.append(None); - continue; - } - }; - - let mut writer = builder.writer().begin(); - - let mut string_columns = string_columns_ref.iter(); - for string_column in string_columns.by_ref() { - if let Some(string) = string_column.value_at(row_idx) { - writer.write_str(string).unwrap(); - break; - } - } - - for string_column in string_columns { - if let Some(string) = string_column.value_at(row_idx) { - writer.write_str(sep).unwrap(); - writer.write_str(string).unwrap(); - } - } - - writer.finish(); - } - Ok(Arc::new(ArrayImpl::from(builder.finish()))) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let sep = self.sep_expr.eval_row(input).await?; - let sep = match sep { - Some(sep) => sep, - None => return Ok(None), - }; - - let mut strings = Vec::with_capacity(self.string_exprs.len()); - for expr in &self.string_exprs { - strings.push(expr.eval_row(input).await?); - } - let mut final_string = String::new(); - - let mut strings_iter = strings.iter(); - if let Some(string) = strings_iter.by_ref().flatten().next() { - final_string.push_str(string.as_utf8()) - } - - for string in strings_iter.flatten() { - final_string.push_str(sep.as_utf8()); - final_string.push_str(string.as_utf8()); - } - - Ok(Some(final_string.into())) - } -} - -impl ConcatWsExpression { - pub fn new( - return_type: DataType, - sep_expr: BoxedExpression, - string_exprs: Vec, - ) -> Self { - ConcatWsExpression { - return_type, - sep_expr, - string_exprs, - } - } -} - -impl<'a> TryFrom<&'a ExprNode> for ConcatWsExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - ensure!(prost.get_function_type().unwrap() == Type::ConcatWs); - - let ret_type = DataType::from(prost.get_return_type().unwrap()); - let RexNode::FuncCall(func_call_node) = prost.get_rex_node().unwrap() else { - bail!("Expected RexNode::FuncCall"); - }; - - let children = &func_call_node.children; - let sep_expr = expr_build_from_prost(&children[0])?; - - let string_exprs = children[1..] - .iter() - .map(expr_build_from_prost) - .collect::>>()?; - Ok(ConcatWsExpression::new(ret_type, sep_expr, string_exprs)) - } -} - -#[cfg(test)] -mod tests { - use itertools::Itertools; - use risingwave_common::array::{DataChunk, DataChunkTestExt}; - use risingwave_common::row::OwnedRow; - use risingwave_common::types::Datum; - use risingwave_pb::data::data_type::TypeName; - use risingwave_pb::data::PbDataType; - use risingwave_pb::expr::expr_node::RexNode; - use risingwave_pb::expr::expr_node::Type::ConcatWs; - use risingwave_pb::expr::{ExprNode, FunctionCall}; - - use crate::expr::expr_concat_ws::ConcatWsExpression; - use crate::expr::test_utils::make_input_ref; - use crate::expr::Expression; - - pub fn make_concat_ws_function(children: Vec, ret: TypeName) -> ExprNode { - ExprNode { - function_type: ConcatWs as i32, - return_type: Some(PbDataType { - type_name: ret as i32, - ..Default::default() - }), - rex_node: Some(RexNode::FuncCall(FunctionCall { children })), - } - } - - #[tokio::test] - async fn test_eval_concat_ws_expr() { - let input_node1 = make_input_ref(0, TypeName::Varchar); - let input_node2 = make_input_ref(1, TypeName::Varchar); - let input_node3 = make_input_ref(2, TypeName::Varchar); - let input_node4 = make_input_ref(3, TypeName::Varchar); - let concat_ws_expr = ConcatWsExpression::try_from(&make_concat_ws_function( - vec![input_node1, input_node2, input_node3, input_node4], - TypeName::Varchar, - )) - .unwrap(); - - let chunk = DataChunk::from_pretty( - " - T T T T - , a b c - . a b c - , . b c - , . . . - . . . .", - ); - - let actual = concat_ws_expr.eval(&chunk).await.unwrap(); - let actual = actual - .iter() - .map(|r| r.map(|s| s.into_utf8())) - .collect_vec(); - - let expected = vec![Some("a,b,c"), None, Some("b,c"), Some(""), None]; - - assert_eq!(actual, expected); - } - - #[tokio::test] - async fn test_eval_row_concat_ws_expr() { - let input_node1 = make_input_ref(0, TypeName::Varchar); - let input_node2 = make_input_ref(1, TypeName::Varchar); - let input_node3 = make_input_ref(2, TypeName::Varchar); - let input_node4 = make_input_ref(3, TypeName::Varchar); - let concat_ws_expr = ConcatWsExpression::try_from(&make_concat_ws_function( - vec![input_node1, input_node2, input_node3, input_node4], - TypeName::Varchar, - )) - .unwrap(); - - let row_inputs = vec![ - vec![Some(","), Some("a"), Some("b"), Some("c")], - vec![None, Some("a"), Some("b"), Some("c")], - vec![Some(","), None, Some("b"), Some("c")], - vec![Some(","), None, None, None], - vec![None, None, None, None], - ]; - - let expected = [Some("a,b,c"), None, Some("b,c"), Some(""), None]; - - for (i, row_input) in row_inputs.iter().enumerate() { - let datum_vec: Vec = row_input.iter().map(|e| e.map(|s| s.into())).collect(); - let row = OwnedRow::new(datum_vec); - - let result = concat_ws_expr.eval_row(&row).await.unwrap(); - let expected = expected[i].map(|s| s.into()); - - assert_eq!(result, expected); - } - } -} diff --git a/src/expr/src/expr/expr_is_null.rs b/src/expr/src/expr/expr_is_null.rs deleted file mode 100644 index 75ef834a624d0..0000000000000 --- a/src/expr/src/expr/expr_is_null.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{ArrayImpl, ArrayRef, BoolArray, DataChunk}; -use risingwave_common::buffer::Bitmap; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, Scalar}; -use risingwave_expr_macro::build_function; - -use crate::expr::{BoxedExpression, Expression}; -use crate::Result; - -#[derive(Debug)] -pub struct IsNullExpression { - child: BoxedExpression, -} - -#[derive(Debug)] -pub struct IsNotNullExpression { - child: BoxedExpression, -} - -impl IsNullExpression { - fn new(child: BoxedExpression) -> Self { - Self { child } - } -} - -impl IsNotNullExpression { - fn new(child: BoxedExpression) -> Self { - Self { child } - } -} - -#[async_trait::async_trait] -impl Expression for IsNullExpression { - fn return_type(&self) -> DataType { - DataType::Boolean - } - - async fn eval(&self, input: &DataChunk) -> Result { - let child_arr = self.child.eval_checked(input).await?; - let arr = BoolArray::new(!child_arr.null_bitmap(), Bitmap::ones(input.capacity())); - - Ok(Arc::new(ArrayImpl::Bool(arr))) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let result = self.child.eval_row(input).await?; - let is_null = result.is_none(); - Ok(Some(is_null.to_scalar_value())) - } -} - -#[async_trait::async_trait] -impl Expression for IsNotNullExpression { - fn return_type(&self) -> DataType { - DataType::Boolean - } - - async fn eval(&self, input: &DataChunk) -> Result { - let child_arr = self.child.eval_checked(input).await?; - let null_bitmap = match Arc::try_unwrap(child_arr) { - Ok(child_arr) => child_arr.into_null_bitmap(), - Err(child_arr) => child_arr.null_bitmap().clone(), - }; - let arr = BoolArray::new(null_bitmap, Bitmap::ones(input.capacity())); - - Ok(Arc::new(ArrayImpl::Bool(arr))) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let result = self.child.eval_row(input).await?; - let is_not_null = result.is_some(); - Ok(Some(is_not_null.to_scalar_value())) - } -} - -#[build_function("is_null(*) -> boolean")] -fn build_is_null_expr(_: DataType, children: Vec) -> Result { - Ok(Box::new(IsNullExpression::new( - children.into_iter().next().unwrap(), - ))) -} - -#[build_function("is_not_null(*) -> boolean")] -fn build_is_not_null_expr(_: DataType, children: Vec) -> Result { - Ok(Box::new(IsNotNullExpression::new( - children.into_iter().next().unwrap(), - ))) -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use risingwave_common::array::{Array, ArrayBuilder, DataChunk, DecimalArrayBuilder}; - use risingwave_common::row::OwnedRow; - use risingwave_common::types::{DataType, Decimal}; - - use crate::expr::expr_is_null::{IsNotNullExpression, IsNullExpression}; - use crate::expr::{BoxedExpression, InputRefExpression}; - use crate::Result; - - async fn do_test( - expr: BoxedExpression, - expected_eval_result: Vec, - expected_eval_row_result: Vec, - ) -> Result<()> { - let input_array = { - let mut builder = DecimalArrayBuilder::new(3); - builder.append(Some(Decimal::from_str("0.1").unwrap())); - builder.append(Some(Decimal::from_str("-0.1").unwrap())); - builder.append(None); - builder.finish() - }; - - let input_chunk = DataChunk::new(vec![input_array.into_ref()], 3); - let result_array = expr.eval(&input_chunk).await.unwrap(); - assert_eq!(3, result_array.len()); - for (i, v) in expected_eval_result.iter().enumerate() { - assert_eq!( - *v, - bool::try_from(result_array.value_at(i).unwrap()).unwrap() - ); - } - - let rows = vec![ - OwnedRow::new(vec![Some(1.into()), Some(2.into())]), - OwnedRow::new(vec![None, Some(2.into())]), - ]; - - for (i, row) in rows.iter().enumerate() { - let result = expr.eval_row(row).await.unwrap().unwrap(); - assert_eq!(expected_eval_row_result[i], result.into_bool()); - } - - Ok(()) - } - - #[tokio::test] - async fn test_is_null() -> Result<()> { - let expr = IsNullExpression::new(Box::new(InputRefExpression::new(DataType::Decimal, 0))); - do_test(Box::new(expr), vec![false, false, true], vec![false, true]) - .await - .unwrap(); - Ok(()) - } - - #[tokio::test] - async fn test_is_not_null() -> Result<()> { - let expr = - IsNotNullExpression::new(Box::new(InputRefExpression::new(DataType::Decimal, 0))); - do_test(Box::new(expr), vec![true, true, false], vec![true, false]) - .await - .unwrap(); - Ok(()) - } -} diff --git a/src/expr/src/expr/expr_jsonb_access.rs b/src/expr/src/expr/expr_jsonb_access.rs deleted file mode 100644 index 5bcc76fd16705..0000000000000 --- a/src/expr/src/expr/expr_jsonb_access.rs +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use either::Either; -use risingwave_common::array::{ - Array, ArrayBuilder, ArrayImpl, ArrayRef, DataChunk, I32Array, JsonbArray, JsonbArrayBuilder, - Utf8Array, Utf8ArrayBuilder, -}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, JsonbRef, Scalar, ScalarRef}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr_macro::build_function; - -use super::{BoxedExpression, Expression}; -use crate::Result; - -/// This is forked from [`crate::expr::template::BinaryExpression`] for the following reasons: -/// * Optimize for the case when rhs path is const. (not implemented yet) -/// * It can return null when neither input is null. -/// * We could `append(RefItem)` directly rather than getting a `OwnedItem` first. -pub struct JsonbAccessExpression { - input: BoxedExpression, - path: Either, - func: F, - _phantom: std::marker::PhantomData, -} - -impl std::fmt::Debug for JsonbAccessExpression { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("JsonbAccessExpression") - .field("input", &self.input) - .field("path", &self.path) - .finish() - } -} - -impl JsonbAccessExpression -where - F: Send + Sync + for<'a> Fn(JsonbRef<'a>, A::RefItem<'_>) -> Option>, -{ - #[expect(dead_code)] - pub fn new_const(input: BoxedExpression, path: A::OwnedItem, func: F) -> Self { - Self { - input, - path: Either::Right(path), - func, - _phantom: std::marker::PhantomData, - } - } - - pub fn new_expr(input: BoxedExpression, path: BoxedExpression, func: F) -> Self { - Self { - input, - path: Either::Left(path), - func, - _phantom: std::marker::PhantomData, - } - } - - pub fn eval_strict<'a>( - &self, - v: Option>, - p: Option>, - ) -> Option> { - match (v, p) { - (Some(v), Some(p)) => (self.func)(v, p), - _ => None, - } - } -} - -#[async_trait::async_trait] -impl Expression for JsonbAccessExpression -where - A: Array, - for<'a> &'a A: From<&'a ArrayImpl>, - O: AccessOutput, - F: Send + Sync + for<'a> Fn(JsonbRef<'a>, A::RefItem<'_>) -> Option>, -{ - fn return_type(&self) -> DataType { - O::return_type() - } - - async fn eval(&self, input: &DataChunk) -> crate::Result { - let Either::Left(path_expr) = &self.path else { - unreachable!("optimization for const path not implemented yet"); - }; - let path_array = path_expr.eval_checked(input).await?; - let path_array: &A = path_array.as_ref().into(); - - let input_array = self.input.eval_checked(input).await?; - let input_array: &JsonbArray = input_array.as_ref().into(); - - let mut builder = O::new(input.capacity()); - match input.visibility() { - // We could ignore visibility and always evaluate access path for all values, because it - // never returns runtime error. But using visibility could save us some clone cost, - // unless we adjust [`JsonbArray`] to make sure all clones are on [`Arc`]. - Some(visibility) => { - for ((v, p), visible) in input_array - .iter() - .zip_eq_fast(path_array.iter()) - .zip_eq_fast(visibility.iter()) - { - let r = visible.then(|| self.eval_strict(v, p)).flatten(); - builder.output_nullable(r)?; - } - } - None => { - for (v, p) in input_array.iter().zip_eq_fast(path_array.iter()) { - builder.output_nullable(self.eval_strict(v, p))?; - } - } - }; - Ok(std::sync::Arc::new(builder.finish().into())) - } - - async fn eval_row(&self, input: &OwnedRow) -> crate::Result { - let Either::Left(path_expr) = &self.path else { - unreachable!("optimization for const path not implemented yet"); - }; - let p = path_expr.eval_row(input).await?; - let p = p - .as_ref() - .map(|p| p.as_scalar_ref_impl().try_into().unwrap()); - - let v = self.input.eval_row(input).await?; - let v = v - .as_ref() - .map(|v| v.as_scalar_ref_impl().try_into().unwrap()); - - let r = self.eval_strict(v, p); - Ok(r.and_then(O::to_datum)) - } -} - -pub fn jsonb_object_field<'a>(v: JsonbRef<'a>, p: &str) -> Option> { - v.access_object_field(p) -} - -pub fn jsonb_array_element(v: JsonbRef<'_>, p: i32) -> Option> { - let idx = if p < 0 { - let Ok(len) = v.array_len() else { - return None; - }; - if ((-p) as usize) > len { - return None; - } else { - len - ((-p) as usize) - } - } else { - p as usize - }; - v.access_array_element(idx) -} - -trait AccessOutput: ArrayBuilder { - fn return_type() -> DataType; - fn output(&mut self, v: JsonbRef<'_>) -> crate::Result<()>; - fn to_datum(v: JsonbRef<'_>) -> Datum; - fn output_nullable(&mut self, v: Option>) -> crate::Result<()> { - match v { - Some(v) => self.output(v)?, - None => self.append_null(), - }; - Ok(()) - } -} - -impl AccessOutput for JsonbArrayBuilder { - fn return_type() -> DataType { - DataType::Jsonb - } - - fn output(&mut self, v: JsonbRef<'_>) -> crate::Result<()> { - self.append(Some(v)); - Ok(()) - } - - fn to_datum(v: JsonbRef<'_>) -> Datum { - Some(v.to_owned_scalar().to_scalar_value()) - } -} - -impl AccessOutput for Utf8ArrayBuilder { - fn return_type() -> DataType { - DataType::Varchar - } - - fn output(&mut self, v: JsonbRef<'_>) -> crate::Result<()> { - match v.is_jsonb_null() { - true => self.append_null(), - false => { - let mut writer = self.writer().begin(); - v.force_str(&mut writer) - .map_err(|e| crate::ExprError::Internal(e.into()))?; - writer.finish(); - } - }; - Ok(()) - } - - fn to_datum(v: JsonbRef<'_>) -> Datum { - match v.is_jsonb_null() { - true => None, - false => { - let mut s = String::new(); - v.force_str(&mut s).unwrap(); - let s: Box = s.into(); - Some(s.to_scalar_value()) - } - } - } -} - -#[build_function("jsonb_access_inner(jsonb, varchar) -> jsonb")] -fn build_jsonb_access_object_field( - _return_type: DataType, - children: Vec, -) -> Result { - let mut iter = children.into_iter(); - let l = iter.next().unwrap(); - let r = iter.next().unwrap(); - Ok( - JsonbAccessExpression::::new_expr( - l, - r, - jsonb_object_field, - ) - .boxed(), - ) -} - -#[build_function("jsonb_access_inner(jsonb, int32) -> jsonb")] -fn build_jsonb_access_array_element( - _return_type: DataType, - children: Vec, -) -> Result { - let mut iter = children.into_iter(); - let l = iter.next().unwrap(); - let r = iter.next().unwrap(); - Ok( - JsonbAccessExpression::::new_expr( - l, - r, - jsonb_array_element, - ) - .boxed(), - ) -} - -#[build_function("jsonb_access_str(jsonb, varchar) -> varchar")] -fn build_jsonb_access_object_field_str( - _return_type: DataType, - children: Vec, -) -> Result { - let mut iter = children.into_iter(); - let l = iter.next().unwrap(); - let r = iter.next().unwrap(); - Ok( - JsonbAccessExpression::::new_expr(l, r, jsonb_object_field) - .boxed(), - ) -} - -#[build_function("jsonb_access_str(jsonb, int32) -> varchar")] -fn build_jsonb_access_array_element_str( - _return_type: DataType, - children: Vec, -) -> Result { - let mut iter = children.into_iter(); - let l = iter.next().unwrap(); - let r = iter.next().unwrap(); - Ok( - JsonbAccessExpression::::new_expr(l, r, jsonb_array_element) - .boxed(), - ) -} - -#[cfg(test)] -mod tests { - use std::vec; - - use risingwave_common::array::{ArrayImpl, DataChunk, Utf8Array}; - use risingwave_common::types::Scalar; - use risingwave_common::util::value_encoding::serialize_datum; - use risingwave_pb::data::data_type::TypeName; - use risingwave_pb::data::{DataType as ProstDataType, Datum as ProstDatum}; - use risingwave_pb::expr::expr_node::{RexNode, Type}; - use risingwave_pb::expr::{ExprNode, FunctionCall}; - - use crate::expr::build_from_prost; - - #[tokio::test] - async fn test_array_access_expr() { - let values = FunctionCall { - children: vec![ - ExprNode { - function_type: Type::Unspecified as i32, - return_type: Some(ProstDataType { - type_name: TypeName::Varchar as i32, - ..Default::default() - }), - rex_node: Some(RexNode::Constant(ProstDatum { - body: serialize_datum(Some("foo".into()).as_ref()), - })), - }, - ExprNode { - function_type: Type::Unspecified as i32, - return_type: Some(ProstDataType { - type_name: TypeName::Varchar as i32, - ..Default::default() - }), - rex_node: Some(RexNode::Constant(ProstDatum { - body: serialize_datum(Some("bar".into()).as_ref()), - })), - }, - ], - }; - let array_index = FunctionCall { - children: vec![ - ExprNode { - function_type: Type::Array as i32, - return_type: Some(ProstDataType { - type_name: TypeName::List as i32, - field_type: vec![ProstDataType { - type_name: TypeName::Varchar as i32, - ..Default::default() - }], - ..Default::default() - }), - rex_node: Some(RexNode::FuncCall(values)), - }, - ExprNode { - function_type: Type::Unspecified as i32, - return_type: Some(ProstDataType { - type_name: TypeName::Int32 as i32, - ..Default::default() - }), - rex_node: Some(RexNode::Constant(ProstDatum { - body: serialize_datum(Some(1_i32.to_scalar_value()).as_ref()), - })), - }, - ], - }; - let access = ExprNode { - function_type: Type::ArrayAccess as i32, - return_type: Some(ProstDataType { - type_name: TypeName::Varchar as i32, - ..Default::default() - }), - rex_node: Some(RexNode::FuncCall(array_index)), - }; - let expr = build_from_prost(&access); - assert!(expr.is_ok()); - - let res = expr.unwrap().eval(&DataChunk::new_dummy(1)).await.unwrap(); - assert_eq!(*res, ArrayImpl::Utf8(Utf8Array::from_iter(["foo"]))); - } -} diff --git a/src/expr/src/expr/expr_nested_construct.rs b/src/expr/src/expr/expr_nested_construct.rs deleted file mode 100644 index ece26ed138258..0000000000000 --- a/src/expr/src/expr/expr_nested_construct.rs +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::convert::TryFrom; -use std::sync::Arc; - -use risingwave_common::array::{ - ArrayBuilder, ArrayImpl, ArrayRef, DataChunk, ListArrayBuilder, ListValue, StructArray, - StructValue, -}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, Scalar}; -use risingwave_pb::expr::expr_node::{RexNode, Type}; -use risingwave_pb::expr::ExprNode; - -use crate::expr::{build_from_prost as expr_build_from_prost, BoxedExpression, Expression}; -use crate::{bail, ensure, ExprError, Result}; - -#[derive(Debug)] -pub struct NestedConstructExpression { - data_type: DataType, - elements: Vec, -} - -#[async_trait::async_trait] -impl Expression for NestedConstructExpression { - fn return_type(&self) -> DataType { - self.data_type.clone() - } - - async fn eval(&self, input: &DataChunk) -> Result { - let mut columns = Vec::with_capacity(self.elements.len()); - for e in &self.elements { - columns.push(e.eval_checked(input).await?); - } - - if let DataType::Struct(ty) = &self.data_type { - let array = StructArray::new(ty.clone(), columns, input.vis().to_bitmap()); - Ok(Arc::new(ArrayImpl::Struct(array))) - } else if let DataType::List { .. } = &self.data_type { - let chunk = DataChunk::new(columns, input.vis().clone()); - let mut builder = ListArrayBuilder::with_type(input.capacity(), self.data_type.clone()); - for row in chunk.rows_with_holes() { - if let Some(row) = row { - builder.append_row_ref(row); - } else { - builder.append_null(); - } - } - Ok(Arc::new(ArrayImpl::List(builder.finish()))) - } else { - Err(ExprError::UnsupportedFunction( - "expects struct or list type".to_string(), - )) - } - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let mut datums = Vec::with_capacity(self.elements.len()); - for e in &self.elements { - datums.push(e.eval_row(input).await?); - } - if let DataType::Struct { .. } = &self.data_type { - Ok(Some(StructValue::new(datums).to_scalar_value())) - } else if let DataType::List(_) = &self.data_type { - Ok(Some(ListValue::new(datums).to_scalar_value())) - } else { - Err(ExprError::UnsupportedFunction( - "expects struct or list type".to_string(), - )) - } - } -} - -impl NestedConstructExpression { - pub fn new(data_type: DataType, elements: Vec) -> Self { - NestedConstructExpression { - data_type, - elements, - } - } -} - -impl<'a> TryFrom<&'a ExprNode> for NestedConstructExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - ensure!([Type::Array, Type::Row].contains(&prost.get_function_type().unwrap())); - - let ret_type = DataType::from(prost.get_return_type().unwrap()); - let RexNode::FuncCall(func_call_node) = prost.get_rex_node().unwrap() else { - bail!("Expected RexNode::FuncCall"); - }; - let elements = func_call_node - .children - .iter() - .map(expr_build_from_prost) - .collect::>>()?; - Ok(NestedConstructExpression::new(ret_type, elements)) - } -} - -#[cfg(test)] -mod tests { - use risingwave_common::array::{DataChunk, ListValue}; - use risingwave_common::row::OwnedRow; - use risingwave_common::types::{DataType, Scalar, ScalarImpl}; - - use super::NestedConstructExpression; - use crate::expr::{BoxedExpression, Expression, LiteralExpression}; - - #[tokio::test] - async fn test_eval_array_expr() { - let expr = NestedConstructExpression { - data_type: DataType::List(DataType::Int32.into()), - elements: vec![i32_expr(1.into()), i32_expr(2.into())], - }; - - let arr = expr.eval(&DataChunk::new_dummy(2)).await.unwrap(); - assert_eq!(arr.len(), 2); - } - - #[tokio::test] - async fn test_eval_row_array_expr() { - let expr = NestedConstructExpression { - data_type: DataType::List(DataType::Int32.into()), - elements: vec![i32_expr(1.into()), i32_expr(2.into())], - }; - - let scalar_impl = expr - .eval_row(&OwnedRow::new(vec![])) - .await - .unwrap() - .unwrap(); - let expected = ListValue::new(vec![Some(1.into()), Some(2.into())]).to_scalar_value(); - assert_eq!(expected, scalar_impl); - } - - fn i32_expr(v: ScalarImpl) -> BoxedExpression { - Box::new(LiteralExpression::new(DataType::Int32, Some(v))) - } -} diff --git a/src/expr/src/expr/expr_proctime.rs b/src/expr/src/expr/expr_proctime.rs deleted file mode 100644 index aed36d7da52ef..0000000000000 --- a/src/expr/src/expr/expr_proctime.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use risingwave_common::array::DataChunk; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::epoch; -use risingwave_pb::expr::expr_node::{RexNode, Type}; -use risingwave_pb::expr::ExprNode; - -use super::{Expression, ValueImpl}; -use crate::{bail, ensure, ExprError, Result}; - -#[derive(Debug)] -pub struct ProcTimeExpression; - -impl ProcTimeExpression { - pub fn new() -> Self { - ProcTimeExpression - } -} - -impl<'a> TryFrom<&'a ExprNode> for ProcTimeExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - ensure!(prost.get_function_type().unwrap() == Type::Proctime); - ensure!(DataType::from(prost.get_return_type().unwrap()) == DataType::Timestamptz); - let RexNode::FuncCall(func_call_node) = prost.get_rex_node().unwrap() else { - bail!("Expected RexNode::FuncCall"); - }; - ensure!(func_call_node.get_children().is_empty()); - - Ok(ProcTimeExpression::new()) - } -} - -/// Get the processing time in Timestamptz scalar from the task-local epoch. -fn proc_time_from_epoch() -> Result { - epoch::task_local::curr_epoch() - .map(|e| e.as_scalar()) - .ok_or(ExprError::Context) -} - -#[async_trait::async_trait] -impl Expression for ProcTimeExpression { - fn return_type(&self) -> DataType { - DataType::Timestamptz - } - - async fn eval_v2(&self, input: &DataChunk) -> Result { - proc_time_from_epoch().map(|s| ValueImpl::Scalar { - value: Some(s), - capacity: input.capacity(), - }) - } - - async fn eval_row(&self, _input: &OwnedRow) -> Result { - proc_time_from_epoch().map(Some) - } -} - -#[cfg(test)] -mod tests { - use risingwave_common::array::DataChunk; - use risingwave_common::types::Timestamptz; - use risingwave_common::util::epoch::{Epoch, EpochPair}; - - use super::*; - - #[tokio::test] - async fn test_expr_proctime() { - let proctime_expr = ProcTimeExpression::new(); - let curr_epoch = Epoch::now(); - let epoch = EpochPair { - curr: curr_epoch.0, - prev: 0, - }; - let chunk = DataChunk::new_dummy(3); - - let array = epoch::task_local::scope(epoch, proctime_expr.eval(&chunk)) - .await - .unwrap(); - - for datum_ref in array.iter() { - assert_eq!( - datum_ref, - Some( - Timestamptz::from_millis(curr_epoch.as_unix_millis() as i64) - .unwrap() - .into() - ) - ); - } - } -} diff --git a/src/expr/src/expr/expr_regexp.rs b/src/expr/src/expr/expr_regexp.rs deleted file mode 100644 index 7907bb45ab915..0000000000000 --- a/src/expr/src/expr/expr_regexp.rs +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::str::FromStr; -use std::sync::Arc; - -use itertools::Itertools; -use regex::{Regex, RegexBuilder}; -use risingwave_common::array::{ - Array, ArrayBuilder, ArrayImpl, ArrayRef, DataChunk, ListArrayBuilder, ListRef, ListValue, - Utf8Array, Utf8ArrayBuilder, -}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_common::util::value_encoding::deserialize_datum; -use risingwave_pb::expr::expr_node::{RexNode, Type}; -use risingwave_pb::expr::ExprNode; - -use super::{build_from_prost as expr_build_from_prost, Expression}; -use crate::{bail, ensure, ExprError, Result}; - -#[derive(Debug)] -pub struct RegexpContext { - pub regex: Regex, - pub global: bool, -} - -impl RegexpContext { - pub fn new(pattern: &str, flags: &str) -> Result { - let options = RegexpOptions::from_str(flags)?; - Ok(Self { - regex: RegexBuilder::new(pattern) - .case_insensitive(options.case_insensitive) - .build()?, - global: options.global, - }) - } - - pub fn from_pattern(pattern: Datum) -> Result { - let pattern = match &pattern { - None => NULL_PATTERN, - Some(ScalarImpl::Utf8(s)) => s.as_ref(), - _ => bail!("invalid pattern: {pattern:?}"), - }; - Self::new(pattern, "") - } - - pub fn from_pattern_flags(pattern: Datum, flags: Datum) -> Result { - let pattern = match (&pattern, &flags) { - (None, _) | (_, None) => NULL_PATTERN, - (Some(ScalarImpl::Utf8(s)), _) => s.as_ref(), - _ => bail!("invalid pattern: {pattern:?}"), - }; - let flags = match &flags { - None => "", - Some(ScalarImpl::Utf8(s)) => s.as_ref(), - _ => bail!("invalid flags: {flags:?}"), - }; - Self::new(pattern, flags) - } -} - -/// -struct RegexpOptions { - /// `c` and `i` - case_insensitive: bool, - /// `g` - global: bool, -} - -#[expect(clippy::derivable_impls)] -impl Default for RegexpOptions { - fn default() -> Self { - Self { - case_insensitive: false, - global: false, - } - } -} - -impl FromStr for RegexpOptions { - type Err = ExprError; - - fn from_str(s: &str) -> Result { - let mut opts = Self::default(); - for c in s.chars() { - match c { - // Case sensitive matching here - 'c' => opts.case_insensitive = false, - // Case insensitive matching here - 'i' => opts.case_insensitive = true, - // Global matching here - 'g' => opts.global = true, - _ => { - bail!("invalid regular expression option: \"{c}\""); - } - } - } - Ok(opts) - } -} - -#[derive(Debug)] -pub struct RegexpMatchExpression { - pub child: Box, - pub ctx: RegexpContext, -} - -/// The pattern that matches nothing. -pub const NULL_PATTERN: &str = "a^"; - -impl<'a> TryFrom<&'a ExprNode> for RegexpMatchExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - ensure!(prost.get_function_type().unwrap() == Type::RegexpMatch); - let RexNode::FuncCall(func_call_node) = prost.get_rex_node().unwrap() else { - bail!("Expected RexNode::FuncCall"); - }; - let mut children = func_call_node.children.iter(); - let Some(text_node) = children.next() else { - bail!("Expected argument text"); - }; - let text_expr = expr_build_from_prost(text_node)?; - let Some(pattern_node) = children.next() else { - bail!("Expected argument pattern"); - }; - let mut pattern = match &pattern_node.get_rex_node()? { - RexNode::Constant(pattern_value) => { - let pattern_datum = deserialize_datum( - pattern_value.get_body().as_slice(), - &DataType::from(pattern_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match pattern_datum { - Some(ScalarImpl::Utf8(pattern)) => pattern.to_string(), - // NULL pattern - None => NULL_PATTERN.to_string(), - _ => bail!("Expected pattern to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant pattern in regexp_match".to_string(), - )) - } - }; - - let flags = if let Some(flags_node) = children.next() { - match &flags_node.get_rex_node()? { - RexNode::Constant(flags_value) => { - let flags_datum = deserialize_datum( - flags_value.get_body().as_slice(), - &DataType::from(flags_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match flags_datum { - Some(ScalarImpl::Utf8(flags)) => flags.to_string(), - // NULL flag - None => { - pattern = NULL_PATTERN.to_string(); - "".to_string() - } - _ => bail!("Expected flags to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant flags in regexp_match".to_string(), - )) - } - } - } else { - "".to_string() - }; - - let ctx = RegexpContext::new(&pattern, &flags)?; - Ok(Self { - child: text_expr, - ctx, - }) - } -} - -impl RegexpMatchExpression { - /// Match one row and return the result. - // TODO: The optimization can be allocated. - fn match_one(&self, text: Option<&str>) -> Option { - // If there are multiple captures, then the first one is the whole match, and should be - // ignored in PostgreSQL's behavior. - let skip_flag = self.ctx.regex.captures_len() > 1; - - if let Some(text) = text { - if let Some(capture) = self.ctx.regex.captures(text) { - let list = capture - .iter() - .skip(if skip_flag { 1 } else { 0 }) - .map(|mat| mat.map(|m| m.as_str().into())) - .collect_vec(); - let list = ListValue::new(list); - Some(list) - } else { - None - } - } else { - None - } - } -} - -#[async_trait::async_trait] -impl Expression for RegexpMatchExpression { - fn return_type(&self) -> DataType { - DataType::List(Box::new(DataType::Varchar)) - } - - async fn eval(&self, input: &DataChunk) -> Result { - let text_arr = self.child.eval_checked(input).await?; - let text_arr: &Utf8Array = text_arr.as_ref().into(); - let mut output = ListArrayBuilder::with_type( - input.capacity(), - DataType::List(Box::new(DataType::Varchar)), - ); - - for (text, vis) in text_arr.iter().zip_eq_fast(input.vis().iter()) { - if !vis { - output.append_null(); - } else if let Some(list) = self.match_one(text) { - let list_ref = ListRef::ValueRef { val: &list }; - output.append(Some(list_ref)); - } else { - output.append_null(); - } - } - - Ok(Arc::new(output.finish().into())) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let text = self.child.eval_row(input).await?; - Ok(if let Some(ScalarImpl::Utf8(text)) = text { - self.match_one(Some(&text)).map(Into::into) - } else { - None - }) - } -} - -#[derive(Debug)] -pub struct RegexpReplaceExpression { - /// The source to be matched and replaced - pub source: Box, - /// The regex context, used to match the given pattern - /// Contains `flag` relevant information, now we support `icg` flag options - pub ctx: RegexpContext, - /// The replacement string - pub replacement: String, - /// The actual return type by evaluating this expression - pub return_type: DataType, - /// The start position to replace the source - /// The starting index should be `0` - pub start: Option, - /// The N, used to specified the N-th position to be replaced - /// Note that this field is only available if `start` > 0 - pub n: Option, -} - -/// This trait provides the transformation from `ExprNode` to `RegexpReplaceExpression` -impl<'a> TryFrom<&'a ExprNode> for RegexpReplaceExpression { - type Error = ExprError; - - /// Try to convert the given `ExprNode` to the replace expression - fn try_from(prost: &'a ExprNode) -> Result { - // The function type must be of Type::RegexpReplace - ensure!(prost.get_function_type().unwrap() == Type::RegexpReplace); - - // Get the return type first - let return_type = DataType::from(prost.get_return_type().unwrap()); - - // Get the top node, which must be the function call node in this case - let RexNode::FuncCall(func_call_node) = prost.get_rex_node().unwrap() else { - bail!("Expected RexNode::FuncCall"); - }; - - // The children node, must contain `source`, `pattern`, `replacement` - // `start, N`, `flags` are optional - let mut children = func_call_node.children.iter(); - - // Get the source expression, will be used as the `child` in replace expr - let Some(source_node) = children.next() else { - bail!("Expected argument text"); - }; - let source = expr_build_from_prost(source_node)?; - - // Get the regex pattern of this call - let Some(pattern_node) = children.next() else { - bail!("Expected argument pattern"); - }; - // Store the pattern as the string, to pass in the regex context - let pattern = match &pattern_node.get_rex_node()? { - RexNode::Constant(pattern_value) => { - let pattern_datum = deserialize_datum( - pattern_value.get_body().as_slice(), - &DataType::from(pattern_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match pattern_datum { - Some(ScalarImpl::Utf8(pattern)) => pattern.to_string(), - // NULL pattern - None => NULL_PATTERN.to_string(), - _ => bail!("Expected pattern to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant pattern in regexp_replace".to_string(), - )) - } - }; - - // Get the replacement string of this call - let Some(replacement_node) = children.next() else { - bail!("Expected argument replacement"); - }; - // Same as the pattern above, store as the string - let replacement = match &replacement_node.get_rex_node()? { - RexNode::Constant(replacement_value) => { - let replacement_datum = deserialize_datum( - replacement_value.get_body().as_slice(), - &DataType::from(replacement_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match replacement_datum { - Some(ScalarImpl::Utf8(replacement)) => replacement.to_string(), - // NULL replacement - // FIXME: Do we need the NULL match arm here? - _ => bail!("Expected replacement to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant in regexp_replace".to_string(), - )) - } - }; - - // The parsing for [, start [, N ]] [, flags ] options - let mut flags: Option = None; - let mut start: Option = None; - let mut n: Option = None; - let mut n_flag = false; - let mut f_flag = false; - - // Try to get the next possible node, see if any of the options are specified - if let Some(placeholder_node) = children.next() { - // Get the placeholder text first - let _placeholder = match &placeholder_node.get_rex_node()? { - RexNode::Constant(placeholder_value) => { - let placeholder_datum = deserialize_datum( - placeholder_value.get_body().as_slice(), - &DataType::from(placeholder_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match placeholder_datum { - Some(ScalarImpl::Int32(v)) => { - if v <= 0 { - // `start` must be greater than zero, if ever specified - // This conforms with PG - bail!("`start` must be greater than zero."); - } - start = Some(v as u32); - "".to_string() - } - Some(ScalarImpl::Utf8(v)) => { - // If the `start` is not specified - // Then this must be the `flags` - f_flag = true; - flags = Some(v.to_string()); - "".to_string() - } - // NULL replacement - // FIXME: Do we need the NULL match arm here? - _ => bail!("Expected extra option to be a String/Int32"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant in regexp_replace".to_string(), - )) - } - }; - - // Get the next node - if !f_flag { - if let Some(placeholder_node) = children.next() { - // Get the text as above - let placeholder = match &placeholder_node.get_rex_node()? { - RexNode::Constant(placeholder_value) => { - let placeholder_datum = deserialize_datum( - placeholder_value.get_body().as_slice(), - &DataType::from(placeholder_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match placeholder_datum { - Some(ScalarImpl::Int32(v)) => { - n_flag = true; - n = Some(v as u32); - "".to_string() - } - Some(ScalarImpl::Utf8(v)) => v.to_string(), - // NULL replacement - // FIXME: Do we need the NULL match arm here? - _ => bail!("Expected extra option to be a String/Int32"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant in regexp_replace".to_string(), - )) - } - }; - - if n_flag { - // Check if any flag is specified - if let Some(flag_node) = children.next() { - // Get the flag - flags = match &flag_node.get_rex_node()? { - RexNode::Constant(flag_value) => { - let flag_datum = deserialize_datum( - flag_value.get_body().as_slice(), - &DataType::from(flag_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match flag_datum { - Some(ScalarImpl::Utf8(v)) => Some(v.to_string()), - // NULL replacement - // FIXME: Do we need the NULL match arm here? - _ => bail!("Expected flag to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant in regexp_replace".to_string(), - )) - } - }; - } - } else { - flags = Some(placeholder); - } - } - } - } - - // TODO: Any other error handling? - if let Some(_other) = children.next() { - // There should not any other option after the `flags` - bail!("invalid parameters specified in regexp_replace"); - } - - // Check if the syntax is correct - if flags.is_some() && start.is_some() && n.is_none() { - // `start`, `flag` with no `N` specified is an invalid combination - bail!("invalid syntax for `regexp_replace`"); - } - - // Construct the final `RegexpReplaceExpression` - let flags = if let Some(f) = flags { - f - } else { - "".to_string() - }; - - // The `icg` flags will be set if ever specified - let ctx = RegexpContext::new(&pattern, &flags)?; - - // Construct the regex used to match and replace `\n` expression - // Check: https://docs.rs/regex/latest/regex/struct.Captures.html#method.expand - let regex = Regex::new(r"\\([1-9])").unwrap(); - - // Get the replaced string - let replacement = regex - .replace_all(&replacement, "$${$1}") - // This is for the '\$' substitution - .replace("\\&", "${0}"); - - Ok(Self { - source, - ctx, - replacement, - return_type, - start, - n, - }) - } -} - -impl RegexpReplaceExpression { - /// Match and replace one row, return the replaced string - fn match_row(&self, text: Option<&str>) -> Option { - if let Some(text) = text { - // The start position to begin the search - let start = if let Some(s) = self.start { s - 1 } else { 0 }; - - // This is because the source text may contain unicode - let start = match text.char_indices().nth(start as usize) { - Some((idx, _)) => idx, - // With no match - None => return Some(text.into()), - }; - - if (self.n.is_none() && self.ctx.global) || (self.n.is_some() && self.n.unwrap() == 0) { - // -------------------------------------------------------------- - // `-g` enabled (& `N` is not specified) or `N` is `0` | - // We need to replace all the occurrence of the matched pattern | - // -------------------------------------------------------------- - - // See if there is capture group or not - if self.ctx.regex.captures_len() <= 1 { - // There is no capture groups in the regex - // Just replace all matched patterns after `start` - return Some( - text[..start].to_string() - + &self - .ctx - .regex - .replace_all(&text[start..], self.replacement.clone()), - ); - } else { - // The position to start searching for replacement - let mut search_start = start; - - // Construct the return string - let mut ret = text[..search_start].to_string(); - - // Begin the actual replace logic - while let Some(capture) = self.ctx.regex.captures(&text[search_start..]) { - let match_start = capture.get(0).unwrap().start(); - let match_end = capture.get(0).unwrap().end(); - - if match_start == match_end { - // If this is an empty match - search_start += 1; - continue; - } - - // Append the portion of the text from `search_start` to `match_start` - ret.push_str(&text[search_start..search_start + match_start]); - - // Start to replacing - // Note that the result will be written directly to `ret` buffer - capture.expand(&self.replacement, &mut ret); - - // Update the `search_start` - search_start += match_end; - } - - // Push the rest of the text to return string - ret.push_str(&text[search_start..]); - - Some(ret) - } - } else { - // ------------------------------------------------- - // Only replace the first matched pattern | - // Or the N-th matched pattern if `N` is specified | - // ------------------------------------------------- - - // Construct the return string - let mut ret = if start > 1 { - text[..start].to_string() - } else { - "".to_string() - }; - - // See if there is capture group or not - if self.ctx.regex.captures_len() <= 1 { - // There is no capture groups in the regex - if self.n.is_none() { - // `N` is not specified - ret.push_str(&self.ctx.regex.replacen( - &text[start..], - 1, - &self.replacement, - )); - } else { - // Replace only the N-th match - let mut count = 1; - // The absolute index for the start of searching - let mut search_start = start; - while let Some(capture) = self.ctx.regex.captures(&text[search_start..]) { - // Get the current start & end index - let match_start = capture.get(0).unwrap().start(); - let match_end = capture.get(0).unwrap().end(); - - if count == self.n.unwrap() as i32 { - // We've reached the pattern to replace - // Let's construct the return string - ret = format!( - "{}{}{}", - &text[..search_start + match_start], - &self.replacement, - &text[search_start + match_end..] - ); - break; - } - - // Update the counter - count += 1; - - // Update `start` - search_start += match_end; - } - } - } else { - // There are capture groups in the regex - // Reset return string at the beginning - ret = "".to_string(); - if self.n.is_none() { - // `N` is not specified - if self.ctx.regex.captures(&text[start..]).is_none() { - // No match - return Some(text.into()); - } - // Otherwise replace the source text - if let Some(capture) = self.ctx.regex.captures(&text[start..]) { - let match_start = capture.get(0).unwrap().start(); - let match_end = capture.get(0).unwrap().end(); - - // Get the replaced string and expand it - capture.expand(&self.replacement, &mut ret); - - // Construct the return string - ret = format!( - "{}{}{}", - &text[..start + match_start], - ret, - &text[start + match_end..] - ); - } - } else { - // Replace only the N-th match - let mut count = 1; - while let Some(capture) = self.ctx.regex.captures(&text[start..]) { - if count == self.n.unwrap() as i32 { - // We've reached the pattern to replace - let match_start = capture.get(0).unwrap().start(); - let match_end = capture.get(0).unwrap().end(); - - // Get the replaced string and expand it - capture.expand(&self.replacement, &mut ret); - - // Construct the return string - ret = format!( - "{}{}{}", - &text[..start + match_start], - ret, - &text[start + match_end..] - ); - } - - // Update the counter - count += 1; - } - - // If there is no match, just return the original string - if ret.is_empty() { - ret = text.into(); - } - } - } - - Some(ret) - } - } else { - // The input string is None - // Directly return - None - } - } -} - -#[async_trait::async_trait] -impl Expression for RegexpReplaceExpression { - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, input: &DataChunk) -> Result { - // Get the source text column first - let source_column = self.source.eval_checked(input).await?; - let source_column = source_column.as_utf8(); - - let row_len = input.capacity(); - let vis = input.vis(); - let mut builder = Utf8ArrayBuilder::new(row_len); - - for row_idx in 0..row_len { - // If not visible, just append the `None` - if !vis.is_set(row_idx) { - builder.append(None); - continue; - } - - // Try to get the source text for this column - let source = match source_column.value_at(row_idx) { - Some(s) => s, - None => { - builder.append(None); - continue; - } - }; - - if let Some(ret) = self.match_row(Some(source)) { - builder.append(Some(&ret)); - } else { - builder.append(None); - } - } - - Ok(Arc::new(ArrayImpl::from(builder.finish()))) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - // Get the source text to match and replace - let source = self.source.eval_row(input).await?; - let source = match source { - Some(ScalarImpl::Utf8(s)) => s, - // The input source is invalid, directly return None - _ => return Ok(None), - }; - - Ok(self - .match_row(Some(&source)) - .map(|replaced| replaced.into())) - } -} diff --git a/src/expr/src/expr/expr_regexp_count.rs b/src/expr/src/expr/expr_regexp_count.rs deleted file mode 100644 index 28e5b75ff74e9..0000000000000 --- a/src/expr/src/expr/expr_regexp_count.rs +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{ - Array, ArrayBuilder, ArrayImpl, ArrayRef, DataChunk, I32ArrayBuilder, -}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::value_encoding::deserialize_datum; -use risingwave_common::{bail, ensure}; -use risingwave_pb::expr::expr_node::{RexNode, Type}; -use risingwave_pb::expr::ExprNode; - -use super::expr_regexp::RegexpContext; -use super::{build_from_prost as expr_build_from_prost, Expression}; -use crate::{ExprError, Result}; - -#[derive(Debug)] -pub struct RegexpCountExpression { - /// The source text - pub source: Box, - /// Relevant regex context, contains `flags` option - pub ctx: RegexpContext, - /// The start position to begin the counting process - pub start: Option, -} - -pub const NULL_PATTERN: &str = "a^"; - -/// This trait provides the transformation from `ExprNode` to `RegexpCountExpression` -impl<'a> TryFrom<&'a ExprNode> for RegexpCountExpression { - type Error = ExprError; - - fn try_from(prost: &'a ExprNode) -> Result { - // Sanity check first - ensure!(prost.get_function_type().unwrap() == Type::RegexpCount); - - let RexNode::FuncCall(func_call_node) = prost.get_rex_node().unwrap() else { - bail!("Expected RexNode::FuncCall"); - }; - - let mut children = func_call_node.children.iter(); - - let Some(source_node) = children.next() else { - bail!("Expected source text"); - }; - let source = expr_build_from_prost(source_node)?; - - let Some(pattern_node) = children.next() else { - bail!("Expected pattern text"); - }; - let pattern = match &pattern_node.get_rex_node()? { - RexNode::Constant(pattern_value) => { - let pattern_datum = deserialize_datum( - pattern_value.get_body().as_slice(), - &DataType::from(pattern_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match pattern_datum { - Some(ScalarImpl::Utf8(pattern)) => pattern.to_string(), - // NULL pattern - None => NULL_PATTERN.to_string(), - _ => bail!("Expected pattern to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant pattern in `regexp_count`".to_string(), - )) - } - }; - - // Parsing for [ , start [, flags ]] - let mut flags: Option = None; - let mut start: Option = None; - - // See if `start` is specified - if let Some(start_node) = children.next() { - start = match &start_node.get_rex_node()? { - RexNode::Constant(start_value) => { - let start_datum = deserialize_datum( - start_value.get_body().as_slice(), - &DataType::from(start_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match start_datum { - Some(ScalarImpl::Int32(start)) => { - if start <= 0 { - bail!("start must greater than zero"); - } - Some(start as u32) - } - _ => bail!("Expected start to be a Unsigned Int32"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant start in `regexp_count`".to_string(), - )) - } - }; - - // See if `flags` is specified - if let Some(flags_node) = children.next() { - flags = match &flags_node.get_rex_node()? { - RexNode::Constant(flags_value) => { - let flags_datum = deserialize_datum( - flags_value.get_body().as_slice(), - &DataType::from(flags_node.get_return_type().unwrap()), - ) - .map_err(|e| ExprError::Internal(e.into()))?; - - match flags_datum { - Some(ScalarImpl::Utf8(flags)) => Some(flags.to_string()), - _ => bail!("Expected flags to be a String"), - } - } - _ => { - return Err(ExprError::UnsupportedFunction( - "non-constant flags in `regexp_count`".to_string(), - )) - } - } - } - }; - - // Sanity check - if children.next().is_some() { - bail!("syntax error in `regexp_count`"); - } - - let flags = flags.unwrap_or_default(); - - if flags.contains('g') { - bail!("`regexp_count` does not support global flag option"); - } - - let ctx = RegexpContext::new(&pattern, &flags)?; - - Ok(Self { source, ctx, start }) - } -} - -impl RegexpCountExpression { - fn match_row(&self, text: Option<&str>) -> Option { - if let Some(text) = text { - // First get the start position to count for - let start = if let Some(s) = self.start { s - 1 } else { 0 }; - - // For unicode purpose - let mut start = match text.char_indices().nth(start as usize) { - Some((idx, _)) => idx, - // The `start` is out of bound - None => return Some(0), - }; - - let mut count = 0; - - while let Some(captures) = self.ctx.regex.captures(&text[start..]) { - count += 1; - start += captures.get(0).unwrap().end(); - } - - Some(count) - } else { - // Input string is None, the return value should be NULL - None - } - } -} - -#[async_trait::async_trait] -impl Expression for RegexpCountExpression { - fn return_type(&self) -> DataType { - DataType::Int32 - } - - async fn eval(&self, input: &DataChunk) -> Result { - let source_column = self.source.eval_checked(input).await?; - let source_column = source_column.as_utf8(); - - let row_len = input.capacity(); - let vis = input.vis(); - let mut builder: I32ArrayBuilder = ArrayBuilder::new(row_len); - - for row_idx in 0..row_len { - if !vis.is_set(row_idx) { - builder.append(None); - continue; - } - - let source = source_column.value_at(row_idx); - builder.append(self.match_row(source)); - } - - Ok(Arc::new(ArrayImpl::from(builder.finish()))) - } - - async fn eval_row(&self, input: &OwnedRow) -> Result { - let source = self.source.eval_row(input).await?; - // Will panic if the input text is not a String - let source = match source { - Some(ScalarImpl::Utf8(s)) => s, - None => return Ok(None), - // Other than the above cases - // The input is invalid and we should panic here - _ => bail!("source should be a String"), - }; - - Ok(self - .match_row(Some(&source)) - .map(|replaced| replaced.into())) - } -} diff --git a/src/expr/src/expr/expr_timestamp_to_char_const_tmpl.rs b/src/expr/src/expr/expr_timestamp_to_char_const_tmpl.rs deleted file mode 100644 index 4164dfed44053..0000000000000 --- a/src/expr/src/expr/expr_timestamp_to_char_const_tmpl.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::fmt::Write; -use std::sync::Arc; - -use risingwave_common::array::{Array, ArrayBuilder, TimestampArray, Utf8ArrayBuilder}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr_macro::build_function; - -use super::{BoxedExpression, Expression, Result}; -use crate::expr::template::BinaryBytesExpression; -use crate::vector_op::to_char::{compile_pattern_to_chrono, to_char_timestamp, ChronoPattern}; - -#[derive(Debug)] -struct ExprToCharConstTmplContext { - chrono_pattern: ChronoPattern, -} - -#[derive(Debug)] -struct ExprToCharConstTmpl { - child: Box, - ctx: ExprToCharConstTmplContext, -} - -#[async_trait::async_trait] -impl Expression for ExprToCharConstTmpl { - fn return_type(&self) -> DataType { - DataType::Varchar - } - - async fn eval( - &self, - input: &risingwave_common::array::DataChunk, - ) -> crate::Result { - let data_arr = self.child.eval_checked(input).await?; - let data_arr: &TimestampArray = data_arr.as_ref().into(); - let mut output = Utf8ArrayBuilder::new(input.capacity()); - for (data, vis) in data_arr.iter().zip_eq_fast(input.vis().iter()) { - if !vis { - output.append_null(); - } else if let Some(data) = data { - let mut writer = output.writer().begin(); - let fmt = data - .0 - .format_with_items(self.ctx.chrono_pattern.borrow_dependent().iter()); - write!(writer, "{fmt}").unwrap(); - writer.finish(); - } else { - output.append_null(); - } - } - - Ok(Arc::new(output.finish().into())) - } - - async fn eval_row(&self, input: &OwnedRow) -> crate::Result { - let data = self.child.eval_row(input).await?; - Ok(if let Some(ScalarImpl::Timestamp(data)) = data { - Some( - data.0 - .format_with_items(self.ctx.chrono_pattern.borrow_dependent().iter()) - .to_string() - .into(), - ) - } else { - None - }) - } -} - -#[build_function("to_char(timestamp, varchar) -> varchar")] -fn build_to_char_expr( - return_type: DataType, - children: Vec, -) -> Result { - use risingwave_common::array::*; - - let mut iter = children.into_iter(); - let data_expr = iter.next().unwrap(); - let tmpl_expr = iter.next().unwrap(); - - Ok(if let Ok(Some(tmpl)) = tmpl_expr.eval_const() { - ExprToCharConstTmpl { - ctx: ExprToCharConstTmplContext { - chrono_pattern: compile_pattern_to_chrono(tmpl.as_utf8()), - }, - child: data_expr, - } - .boxed() - } else { - BinaryBytesExpression::::new( - data_expr, - tmpl_expr, - return_type, - #[allow(clippy::unit_arg)] - |a, b, w| Ok(to_char_timestamp(a, b, w)), - ) - .boxed() - }) -} diff --git a/src/expr/src/expr/expr_timestamptz_to_char_const_tmpl.rs b/src/expr/src/expr/expr_timestamptz_to_char_const_tmpl.rs deleted file mode 100644 index d25f68424092a..0000000000000 --- a/src/expr/src/expr/expr_timestamptz_to_char_const_tmpl.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{Array, ArrayBuilder, TimestamptzArray, Utf8ArrayBuilder}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr_macro::build_function; - -use super::{BoxedExpression, Expression, Result}; -use crate::expr::template::TernaryBytesExpression; -use crate::vector_op::to_char::{ - compile_pattern_to_chrono, to_char_timestamptz, to_char_timestamptz_const_tmpl, ChronoPattern, -}; -use crate::ExprError; - -#[derive(Debug)] -struct ExprToCharConstTmplContext { - chrono_pattern: ChronoPattern, - time_zone: Box, -} - -#[derive(Debug)] -struct ExprToCharConstTmpl { - child: Box, - ctx: ExprToCharConstTmplContext, -} - -#[async_trait::async_trait] -impl Expression for ExprToCharConstTmpl { - fn return_type(&self) -> DataType { - DataType::Varchar - } - - async fn eval( - &self, - input: &risingwave_common::array::DataChunk, - ) -> crate::Result { - let data_arr = self.child.eval_checked(input).await?; - let data_arr: &TimestamptzArray = data_arr.as_ref().into(); - let mut output = Utf8ArrayBuilder::new(input.capacity()); - for (data, vis) in data_arr.iter().zip_eq_fast(input.vis().iter()) { - if !vis { - output.append_null(); - } else if let Some(data) = data { - let mut writer = output.writer().begin(); - to_char_timestamptz_const_tmpl( - data, - &self.ctx.chrono_pattern, - &self.ctx.time_zone, - &mut writer, - )?; - writer.finish(); - } else { - output.append_null(); - } - } - - Ok(Arc::new(output.finish().into())) - } - - async fn eval_row(&self, input: &OwnedRow) -> crate::Result { - let data = self.child.eval_row(input).await?; - Ok(if let Some(ScalarImpl::Timestamptz(data)) = data { - let mut s = String::new(); - to_char_timestamptz_const_tmpl( - data, - &self.ctx.chrono_pattern, - &self.ctx.time_zone, - &mut s, - )?; - Some(s.into()) - } else { - None - }) - } -} - -// Only to register this signature to function signature map. -#[build_function("to_char(timestamptz, varchar) -> varchar")] -fn build_dummy(_return_type: DataType, _children: Vec) -> Result { - Err(ExprError::UnsupportedFunction( - "to_char should have been rewritten to include timezone".into(), - )) -} - -#[build_function("to_char(timestamptz, varchar, varchar) -> varchar")] -fn build_to_char_expr( - return_type: DataType, - children: Vec, -) -> Result { - use risingwave_common::array::*; - - let mut iter = children.into_iter(); - let data_expr = iter.next().unwrap(); - let tmpl_expr = iter.next().unwrap(); - let zone_expr = iter.next().unwrap(); - - Ok(if let Ok(Some(tmpl)) = tmpl_expr.eval_const() - && let Ok(Some(zone)) = zone_expr.eval_const() { - ExprToCharConstTmpl { - ctx: ExprToCharConstTmplContext { - chrono_pattern: compile_pattern_to_chrono(tmpl.as_utf8()), - time_zone: zone.into_utf8(), - }, - child: data_expr, - } - .boxed() - } else { - TernaryBytesExpression::::new( - data_expr, - tmpl_expr, - zone_expr, - return_type, - to_char_timestamptz, - ) - .boxed() - }) -} diff --git a/src/expr/src/expr/expr_to_date_const_tmpl.rs b/src/expr/src/expr/expr_to_date_const_tmpl.rs deleted file mode 100644 index 2da9919886fb4..0000000000000 --- a/src/expr/src/expr/expr_to_date_const_tmpl.rs +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{Array, ArrayBuilder, DateArrayBuilder, Utf8Array}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr_macro::build_function; - -use super::{BoxedExpression, Expression, Result}; -use crate::expr::template::BinaryExpression; -use crate::vector_op::to_char::{compile_pattern_to_chrono, ChronoPattern}; -use crate::vector_op::to_timestamp::{to_date, to_date_const_tmpl}; - -#[derive(Debug)] -struct ExprToDateConstTmpl { - child: Box, - chrono_pattern: ChronoPattern, -} - -#[async_trait::async_trait] -impl Expression for ExprToDateConstTmpl { - fn return_type(&self) -> DataType { - DataType::Date - } - - async fn eval( - &self, - input: &risingwave_common::array::DataChunk, - ) -> crate::Result { - let data_arr = self.child.eval_checked(input).await?; - let data_arr: &Utf8Array = data_arr.as_ref().into(); - let mut output = DateArrayBuilder::new(input.capacity()); - for (data, vis) in data_arr.iter().zip_eq_fast(input.vis().iter()) { - if !vis { - output.append_null(); - } else if let Some(data) = data { - let res = to_date_const_tmpl(data, &self.chrono_pattern)?; - output.append(Some(res)); - } else { - output.append_null(); - } - } - - Ok(Arc::new(output.finish().into())) - } - - async fn eval_row(&self, input: &OwnedRow) -> crate::Result { - let data = self.child.eval_row(input).await?; - Ok(if let Some(ScalarImpl::Utf8(data)) = data { - let res = to_date_const_tmpl(&data, &self.chrono_pattern)?; - Some(res.into()) - } else { - None - }) - } -} - -#[build_function("char_to_date(varchar, varchar) -> date")] -fn build_to_date_expr( - return_type: DataType, - children: Vec, -) -> Result { - use risingwave_common::array::*; - - let mut iter = children.into_iter(); - let data_expr = iter.next().unwrap(); - let tmpl_expr = iter.next().unwrap(); - - Ok(if let Ok(Some(tmpl)) = tmpl_expr.eval_const() { - ExprToDateConstTmpl { - child: data_expr, - chrono_pattern: compile_pattern_to_chrono(tmpl.as_utf8()), - } - .boxed() - } else { - BinaryExpression::::new( - data_expr, - tmpl_expr, - return_type, - to_date, - ) - .boxed() - }) -} diff --git a/src/expr/src/expr/expr_to_timestamp_const_tmpl.rs b/src/expr/src/expr/expr_to_timestamp_const_tmpl.rs deleted file mode 100644 index cdbf75f6c0df2..0000000000000 --- a/src/expr/src/expr/expr_to_timestamp_const_tmpl.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{Array, ArrayBuilder, TimestamptzArrayBuilder, Utf8Array}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, ScalarImpl}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr_macro::build_function; - -use super::{BoxedExpression, Expression, Result}; -use crate::expr::template::{BinaryExpression, TernaryExpression}; -use crate::vector_op::to_char::{compile_pattern_to_chrono, ChronoPattern}; -use crate::vector_op::to_timestamp::{to_timestamp, to_timestamp_const_tmpl, to_timestamp_legacy}; -use crate::ExprError; - -#[derive(Debug)] -struct ExprToTimestampConstTmplContext { - chrono_pattern: ChronoPattern, - time_zone: Box, -} - -#[derive(Debug)] -struct ExprToTimestampConstTmpl { - child: Box, - ctx: ExprToTimestampConstTmplContext, -} - -#[async_trait::async_trait] -impl Expression for ExprToTimestampConstTmpl { - fn return_type(&self) -> DataType { - DataType::Timestamptz - } - - async fn eval( - &self, - input: &risingwave_common::array::DataChunk, - ) -> crate::Result { - let data_arr = self.child.eval_checked(input).await?; - let data_arr: &Utf8Array = data_arr.as_ref().into(); - let mut output = TimestamptzArrayBuilder::new(input.capacity()); - for (data, vis) in data_arr.iter().zip_eq_fast(input.vis().iter()) { - if !vis { - output.append_null(); - } else if let Some(data) = data { - let res = - to_timestamp_const_tmpl(data, &self.ctx.chrono_pattern, &self.ctx.time_zone)?; - output.append(Some(res)); - } else { - output.append_null(); - } - } - - Ok(Arc::new(output.finish().into())) - } - - async fn eval_row(&self, input: &OwnedRow) -> crate::Result { - let data = self.child.eval_row(input).await?; - Ok(if let Some(ScalarImpl::Utf8(data)) = data { - let res = - to_timestamp_const_tmpl(&data, &self.ctx.chrono_pattern, &self.ctx.time_zone)?; - Some(res.into()) - } else { - None - }) - } -} - -// Only to register this signature to function signature map. -#[build_function("to_timestamp1(varchar, varchar) -> timestamptz")] -fn build_dummy(_return_type: DataType, _children: Vec) -> Result { - Err(ExprError::UnsupportedFunction( - "to_timestamp should have been rewritten to include timezone".into(), - )) -} - -#[build_function("to_timestamp1(varchar, varchar, varchar) -> timestamptz")] -fn build_to_timestamp_expr( - return_type: DataType, - children: Vec, -) -> Result { - use risingwave_common::array::*; - - let mut iter = children.into_iter(); - let data_expr = iter.next().unwrap(); - let tmpl_expr = iter.next().unwrap(); - let zone_expr = iter.next().unwrap(); - - Ok(if let Ok(Some(tmpl)) = tmpl_expr.eval_const() - && let Ok(Some(zone)) = zone_expr.eval_const() { - ExprToTimestampConstTmpl { - ctx: ExprToTimestampConstTmplContext { - chrono_pattern: compile_pattern_to_chrono(tmpl.as_utf8()), - time_zone: zone.into_utf8(), - }, - child: data_expr, - } - .boxed() - } else { - TernaryExpression::::new( - data_expr, - tmpl_expr, - zone_expr, - return_type, - to_timestamp, - ) - .boxed() - }) -} - -/// Support building the variant returning timestamp without time zone for backward compatibility. -#[build_function("to_timestamp1(varchar, varchar) -> timestamp", deprecated)] -pub fn build_to_timestamp_expr_legacy( - return_type: DataType, - children: Vec, -) -> Result { - use risingwave_common::array::*; - - let mut iter = children.into_iter(); - let data_expr = iter.next().unwrap(); - let tmpl_expr = iter.next().unwrap(); - - Ok( - BinaryExpression::::new( - data_expr, - tmpl_expr, - return_type, - to_timestamp_legacy, - ) - .boxed(), - ) -} diff --git a/src/expr/src/expr/expr_unary.rs b/src/expr/src/expr/expr_unary.rs deleted file mode 100644 index a286af8177e82..0000000000000 --- a/src/expr/src/expr/expr_unary.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! For expression that only accept one value as input (e.g. CAST) - -#[cfg(test)] -mod tests { - use itertools::Itertools; - use risingwave_common::array::*; - use risingwave_common::types::{Date, Scalar}; - use risingwave_pb::expr::expr_node::PbType; - - use super::super::*; - use crate::vector_op::cast::{str_parse, try_cast}; - - #[tokio::test] - async fn test_unary() { - test_unary_bool::(|x| !x, PbType::Not).await; - test_unary_date::(|x| try_cast(x).unwrap(), PbType::Cast).await; - test_str_to_int16::(|x| str_parse(x).unwrap()).await; - } - - #[tokio::test] - async fn test_i16_to_i32() { - let mut input = Vec::>::new(); - let mut target = Vec::>::new(); - for i in 0..100i16 { - if i % 2 == 0 { - target.push(Some(i as i32)); - input.push(Some(i)); - } else { - input.push(None); - target.push(None); - } - } - let col1 = I16Array::from_iter(&input).into_ref(); - let data_chunk = DataChunk::new(vec![col1], 100); - let expr = build_from_pretty("(cast:int4 $0:int2)"); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &I32Array = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..input.len() { - let row = OwnedRow::new(vec![input[i].map(|int| int.to_scalar_value())]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].map(|int| int.to_scalar_value()); - assert_eq!(result, expected); - } - } - - #[tokio::test] - async fn test_neg() { - let input = [Some(1), Some(0), Some(-1)]; - let target = [Some(-1), Some(0), Some(1)]; - - let col1 = I32Array::from_iter(&input).into_ref(); - let data_chunk = DataChunk::new(vec![col1], 3); - let expr = build_from_pretty("(neg:int4 $0:int4)"); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &I32Array = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..input.len() { - let row = OwnedRow::new(vec![input[i].map(|int| int.to_scalar_value())]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].map(|int| int.to_scalar_value()); - assert_eq!(result, expected); - } - } - - async fn test_str_to_int16(f: F) - where - A: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> ::RefItem<'a>: PartialEq, - F: Fn(&str) -> ::OwnedItem, - { - let mut input = Vec::>>::new(); - let mut target = Vec::::OwnedItem>>::new(); - for i in 0..1u32 { - if i % 2 == 0 { - let s = i.to_string().into_boxed_str(); - target.push(Some(f(&s))); - input.push(Some(s)); - } else { - input.push(None); - target.push(None); - } - } - let col1_data = &input.iter().map(|x| x.as_ref().map(|x| &**x)).collect_vec(); - let col1 = Utf8Array::from_iter(col1_data).into_ref(); - let data_chunk = DataChunk::new(vec![col1], 1); - let expr = build_from_pretty("(cast:int2 $0:varchar)"); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &A = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..input.len() { - let row = OwnedRow::new(vec![input[i] - .as_ref() - .cloned() - .map(|str| str.to_scalar_value())]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); - assert_eq!(result, expected); - } - } - - async fn test_unary_bool(f: F, kind: PbType) - where - A: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> ::RefItem<'a>: PartialEq, - F: Fn(bool) -> ::OwnedItem, - { - let mut input = Vec::>::new(); - let mut target = Vec::::OwnedItem>>::new(); - for i in 0..100 { - if i % 2 == 0 { - input.push(Some(true)); - target.push(Some(f(true))); - } else if i % 3 == 0 { - input.push(Some(false)); - target.push(Some(f(false))); - } else { - input.push(None); - target.push(None); - } - } - - let col1 = BoolArray::from_iter(&input).into_ref(); - let data_chunk = DataChunk::new(vec![col1], 100); - let expr = build_from_pretty(format!("({kind:?}:boolean $0:boolean)")); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &A = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..input.len() { - let row = OwnedRow::new(vec![input[i].map(|b| b.to_scalar_value())]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); - assert_eq!(result, expected); - } - } - - async fn test_unary_date(f: F, kind: PbType) - where - A: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> ::RefItem<'a>: PartialEq, - F: Fn(Date) -> ::OwnedItem, - { - let mut input = Vec::>::new(); - let mut target = Vec::::OwnedItem>>::new(); - for i in 0..100 { - if i % 2 == 0 { - let date = Date::from_num_days_from_ce_uncheck(i); - input.push(Some(date)); - target.push(Some(f(date))); - } else { - input.push(None); - target.push(None); - } - } - - let col1 = DateArray::from_iter(&input).into_ref(); - let data_chunk = DataChunk::new(vec![col1], 100); - let expr = build_from_pretty(format!("({kind:?}:timestamp $0:date)")); - let res = expr.eval(&data_chunk).await.unwrap(); - let arr: &A = res.as_ref().into(); - for (idx, item) in arr.iter().enumerate() { - let x = target[idx].as_ref().map(|x| x.as_scalar_ref()); - assert_eq!(x, item); - } - - for i in 0..input.len() { - let row = OwnedRow::new(vec![input[i].map(|d| d.to_scalar_value())]); - let result = expr.eval_row(&row).await.unwrap(); - let expected = target[i].as_ref().cloned().map(|x| x.to_scalar_value()); - assert_eq!(result, expected); - } - } -} diff --git a/src/expr/src/expr/template.rs b/src/expr/src/expr/template.rs deleted file mode 100644 index a2d0535b90052..0000000000000 --- a/src/expr/src/expr/template.rs +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Template macro to generate code for unary/binary/ternary expression. - -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; - -use itertools::{multizip, Itertools}; -use paste::paste; -use risingwave_common::array::{Array, ArrayBuilder, ArrayImpl, ArrayRef, DataChunk, Utf8Array}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{option_as_scalar_ref, DataType, Datum, Scalar}; -use risingwave_common::util::iter_util::ZipEqDebug; - -use crate::expr::{BoxedExpression, Expression, ValueImpl, ValueRef}; -use crate::Result; - -macro_rules! gen_eval { - { ($macro:ident, $macro_row:ident), $ty_name:ident, $OA:ty, $($arg:ident,)* } => { - fn eval_v2<'a, 'b, 'async_trait>(&'a self, data_chunk: &'b DataChunk) - -> Pin> + Send + 'async_trait>> - where - 'a: 'async_trait, - 'b: 'async_trait, - { - Box::pin(async move { paste! { - $( - let [] = self.[].eval_v2(data_chunk).await?; - let []: ValueRef<'_, $arg> = (&[]).into(); - )* - - Ok(match ($([], )*) { - // If all arguments are scalar, we can directly compute the result. - ($(ValueRef::Scalar { value: [], capacity: [] }, )*) => { - let output_scalar = $macro_row!(self, $([],)*); - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - let capacity = data_chunk.capacity(); - - if cfg!(debug_assertions) { - let all_capacities = [capacity, $([], )*]; - assert!(all_capacities.into_iter().all_equal(), "capacities mismatched: {:?}", all_capacities); - } - - ValueImpl::Scalar { value: output_datum, capacity } - } - - // Otherwise, fallback to array computation. - ($([], )*) => { - let bitmap = data_chunk.visibility(); - let mut output_array = <$OA as Array>::Builder::with_type(data_chunk.capacity(), self.return_type.clone()); - let array = match bitmap { - Some(bitmap) => { - // TODO: use `izip` here. - for (($([], )*), visible) in multizip(($([].iter(), )*)).zip_eq_debug(bitmap.iter()) { - if !visible { - output_array.append_null(); - continue; - } - $macro!(self, output_array, $([],)*) - } - output_array.finish().into() - } - None => { - // TODO: use `izip` here. - for ($([], )*) in multizip(($([].iter(), )*)) { - $macro!(self, output_array, $([],)*) - } - output_array.finish().into() - } - }; - - ValueImpl::Array(Arc::new(array)) - } - }) - }}) - } - - /// `eval_row()` first calls `eval_row()` on the inner expressions to get the resulting datums, - /// then directly calls `$macro_row` to evaluate the current expression. - fn eval_row<'a, 'b, 'async_trait>(&'a self, row: &'b OwnedRow) - -> Pin> + Send + 'async_trait>> - where - 'a: 'async_trait, - 'b: 'async_trait, - { - Box::pin(async move { paste! { - $( - let [] = self.[].eval_row(row).await?; - let [] = [].as_ref().map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - )* - - let output_scalar = $macro_row!(self, $([],)*); - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - Ok(output_datum) - }}) - } - } -} - -macro_rules! eval_normal { - ($self:ident, $output_array:ident, $($arg:ident,)*) => { - if let ($(Some($arg), )*) = ($($arg, )*) { - let ret = ($self.func)($($arg, )*)?; - let output = Some(ret.as_scalar_ref()); - $output_array.append(output); - } else { - $output_array.append(None); - } - } -} -macro_rules! eval_normal_row { - ($self:ident, $($arg:ident,)*) => { - if let ($(Some($arg), )*) = ($($arg, )*) { - let ret = ($self.func)($($arg, )*)?; - Some(ret) - } else { - None - } - } -} - -macro_rules! gen_expr_normal { - ($ty_name:ident, { $($arg:ident),* }) => { - paste! { - pub struct $ty_name< - $($arg: Array, )* - OA: Array, - F: Fn($($arg::RefItem<'_>, )*) -> Result, - > { - $([]: BoxedExpression,)* - return_type: DataType, - func: F, - _phantom: std::marker::PhantomData<($($arg, )* OA)>, - } - - impl<$($arg: Array, )* - OA: Array, - F: Fn($($arg::RefItem<'_>, )*) -> Result + Sync + Send, - > fmt::Debug for $ty_name<$($arg, )* OA, F> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(stringify!($ty_name)) - .field("func", &std::any::type_name::()) - $(.field(stringify!([]), &self.[]))* - .field("return_type", &self.return_type) - .finish() - } - } - - impl<$($arg: Array, )* - OA: Array, - F: Fn($($arg::RefItem<'_>, )*) -> Result + Sync + Send, - > Expression for $ty_name<$($arg, )* OA, F> - where - $(for<'a> ValueRef<'a, $arg>: std::convert::From<&'a ValueImpl>,)* - for<'a> ValueRef<'a, OA>: std::convert::From<&'a ValueImpl>, - { - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - gen_eval! { (eval_normal, eval_normal_row), $ty_name, OA, $($arg, )* } - } - - impl<$($arg: Array, )* - OA: Array, - F: Fn($($arg::RefItem<'_>, )*) -> Result + Sync + Send, - > $ty_name<$($arg, )* OA, F> { - #[allow(dead_code)] - pub fn new( - $([]: BoxedExpression, )* - return_type: DataType, - func: F, - ) -> Self { - Self { - $([], )* - return_type, - func, - _phantom : std::marker::PhantomData, - } - } - } - } - } -} - -macro_rules! eval_bytes { - ($self:ident, $output_array:ident, $($arg:ident,)*) => { - if let ($(Some($arg), )*) = ($($arg, )*) { - { - let mut writer = $output_array.writer().begin(); - ($self.func)($($arg, )* &mut writer)?; - writer.finish(); - } - } else { - $output_array.append(None); - } - } -} -macro_rules! eval_bytes_row { - ($self:ident, $($arg:ident,)*) => { - if let ($(Some($arg), )*) = ($($arg, )*) { - let mut writer = String::new(); - ($self.func)($($arg, )* &mut writer)?; - Some(Box::::from(writer)) - } else { - None - } - } -} - -macro_rules! gen_expr_bytes { - ($ty_name:ident, { $($arg:ident),* }) => { - paste! { - pub struct $ty_name< - $($arg: Array, )* - F: Fn($($arg::RefItem<'_>, )* &mut dyn std::fmt::Write) -> Result<()>, - > { - $([]: BoxedExpression,)* - return_type: DataType, - func: F, - _phantom: std::marker::PhantomData<($($arg, )*)>, - } - - impl<$($arg: Array, )* - F: Fn($($arg::RefItem<'_>, )* &mut dyn std::fmt::Write) -> Result<()> + Sync + Send, - > fmt::Debug for $ty_name<$($arg, )* F> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(stringify!($ty_name)) - .field("func", &std::any::type_name::()) - $(.field(stringify!([]), &self.[]))* - .field("return_type", &self.return_type) - .finish() - } - } - - impl<$($arg: Array, )* - F: Fn($($arg::RefItem<'_>, )* &mut dyn std::fmt::Write) -> Result<()> + Sync + Send, - > Expression for $ty_name<$($arg, )* F> - where - $(for<'a> ValueRef<'a, $arg>: std::convert::From<&'a ValueImpl>,)* - { - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - gen_eval! { (eval_bytes, eval_bytes_row), $ty_name, Utf8Array, $($arg, )* } - } - - impl<$($arg: Array, )* - F: Fn($($arg::RefItem<'_>, )* &mut dyn std::fmt::Write) -> Result<()> + Sync + Send, - > $ty_name<$($arg, )* F> { - pub fn new( - $([]: BoxedExpression, )* - return_type: DataType, - func: F, - ) -> Self { - Self { - $([], )* - return_type, - func, - _phantom: std::marker::PhantomData, - } - } - } - } - } -} - -macro_rules! eval_nullable { - ($self:ident, $output_array:ident, $($arg:ident,)*) => { - { - let ret = ($self.func)($($arg,)*)?; - $output_array.append(option_as_scalar_ref(&ret)); - } - } -} -macro_rules! eval_nullable_row { - ($self:ident, $($arg:ident,)*) => { - ($self.func)($($arg,)*)? - } -} - -macro_rules! gen_expr_nullable { - ($ty_name:ident, { $($arg:ident),* }) => { - paste! { - pub struct $ty_name< - $($arg: Array, )* - OA: Array, - F: Fn($(Option<$arg::RefItem<'_>>, )*) -> Result>, - > { - $([]: BoxedExpression,)* - return_type: DataType, - func: F, - _phantom: std::marker::PhantomData<($($arg, )* OA)>, - } - - impl<$($arg: Array, )* - OA: Array, - F: Fn($(Option<$arg::RefItem<'_>>, )*) -> Result> + Sync + Send, - > fmt::Debug for $ty_name<$($arg, )* OA, F> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(stringify!($ty_name)) - .field("func", &std::any::type_name::()) - $(.field(stringify!([]), &self.[]))* - .field("return_type", &self.return_type) - .finish() - } - } - - #[async_trait::async_trait] - impl<$($arg: Array, )* - OA: Array, - F: Fn($(Option<$arg::RefItem<'_>>, )*) -> Result> + Sync + Send, - > Expression for $ty_name<$($arg, )* OA, F> - where - $(for<'a> ValueRef<'a, $arg>: std::convert::From<&'a ValueImpl>,)* - for<'a> ValueRef<'a, OA>: std::convert::From<&'a ValueImpl>, - { - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - gen_eval! { (eval_nullable, eval_nullable_row), $ty_name, OA, $($arg, )* } - } - - impl<$($arg: Array, )* - OA: Array, - F: Fn($(Option<$arg::RefItem<'_>>, )*) -> Result> + Sync + Send, - > $ty_name<$($arg, )* OA, F> { - // Compile failed due to some GAT lifetime issues so make this field private. - // Check issues #742. - #[allow(dead_code)] - pub fn new( - $([]: BoxedExpression, )* - return_type: DataType, - func: F, - ) -> Self { - Self { - $([], )* - return_type, - func, - _phantom: std::marker::PhantomData, - } - } - } - } - } -} - -gen_expr_normal!(UnaryExpression, { IA1 }); -gen_expr_normal!(BinaryExpression, { IA1, IA2 }); -gen_expr_normal!(TernaryExpression, { IA1, IA2, IA3 }); - -gen_expr_bytes!(UnaryBytesExpression, { IA1 }); -gen_expr_bytes!(BinaryBytesExpression, { IA1, IA2 }); -gen_expr_bytes!(TernaryBytesExpression, { IA1, IA2, IA3 }); -gen_expr_bytes!(QuaternaryBytesExpression, { IA1, IA2, IA3, IA4 }); - -gen_expr_nullable!(UnaryNullableExpression, { IA1 }); -gen_expr_nullable!(BinaryNullableExpression, { IA1, IA2 }); -gen_expr_nullable!(TernaryNullableExpression, { IA1, IA2, IA3 }); - -pub struct NullaryExpression { - return_type: DataType, - func: F, - _phantom: std::marker::PhantomData, -} - -impl Result + Sync + Send> fmt::Debug - for NullaryExpression -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NullaryExpression") - .field("func", &std::any::type_name::()) - .field("return_type", &self.return_type) - .finish() - } -} - -impl Result + Sync + Send> NullaryExpression { - #[allow(dead_code)] - pub fn new(return_type: DataType, func: F) -> Self { - Self { - return_type, - func, - _phantom: std::marker::PhantomData, - } - } -} - -#[async_trait::async_trait] -impl Result + Sync + Send> Expression - for NullaryExpression -where - for<'a> &'a OA: std::convert::From<&'a ArrayImpl>, -{ - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, data_chunk: &DataChunk) -> Result { - let bitmap = data_chunk.visibility(); - let mut output_array = - OA::Builder::with_type(data_chunk.capacity(), self.return_type.clone()); - - match bitmap { - Some(bitmap) => { - for visible in bitmap.iter() { - if !visible { - output_array.append_null(); - continue; - } - let ret = (self.func)()?; - let output = Some(ret.as_scalar_ref()); - output_array.append(output); - } - } - None => { - for _ in 0..data_chunk.capacity() { - let ret = (self.func)()?; - let output = Some(ret.as_scalar_ref()); - output_array.append(output); - } - } - } - Ok(Arc::new(output_array.finish().into())) - } - - async fn eval_row(&self, _: &OwnedRow) -> Result { - let ret = (self.func)()?; - let output_datum = Some(ret.to_scalar_value()); - Ok(output_datum) - } -} diff --git a/src/expr/src/expr/template_fast.rs b/src/expr/src/expr/template_fast.rs deleted file mode 100644 index 971481a47efbf..0000000000000 --- a/src/expr/src/expr/template_fast.rs +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Generic expressions for fast evaluation. -//! -//! Expressions in this module utilize auto-vectorization (SIMD) to speed up evaluation. -//! -//! It contains: -//! - [`BooleanUnaryExpression`] for boolean operations, like `not`. -//! - [`BooleanBinaryExpression`] for boolean comparisons, like `eq`. -//! - [`UnaryExpression`] for unary operations on [`PrimitiveArray`], like `bitwise_not`. -//! - [`BinaryExpression`] for binary operations on [`PrimitiveArray`], like `bitwise_and`. -//! - [`CompareExpression`] for comparisons on [`PrimitiveArray`], like `eq`. -//! - [`IsDistinctFromExpression`] for `is[_not]_distinct_from` on [`PrimitiveArray`]. -//! -//! Note that to enable vectorization, operations must be applied to every element in the array, -//! without any branching. So it is only suitable for infallible operations. - -// allow using `zip` for performance reasons -#![allow(clippy::disallowed_methods)] - -use std::fmt; -use std::marker::PhantomData; -use std::sync::Arc; - -use risingwave_common::array::{ - Array, ArrayImpl, ArrayRef, BoolArray, DataChunk, PrimitiveArray, PrimitiveArrayItemType, -}; -use risingwave_common::buffer::Bitmap; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, Datum, Scalar}; - -use super::{BoxedExpression, Expression}; - -pub struct BooleanUnaryExpression { - child: BoxedExpression, - f_array: FA, - f_value: FV, -} - -impl fmt::Debug for BooleanUnaryExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BooleanUnaryExpression") - .field("child", &self.child) - .finish() - } -} - -impl BooleanUnaryExpression -where - FA: Fn(&BoolArray) -> BoolArray + Send + Sync, - FV: Fn(Option) -> Option + Send + Sync, -{ - pub fn new(child: BoxedExpression, f_array: FA, f_value: FV) -> Self { - BooleanUnaryExpression { - child, - f_array, - f_value, - } - } -} - -#[async_trait::async_trait] -impl Expression for BooleanUnaryExpression -where - FA: Fn(&BoolArray) -> BoolArray + Send + Sync, - FV: Fn(Option) -> Option + Send + Sync, -{ - fn return_type(&self) -> DataType { - DataType::Boolean - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let child = self.child.eval_checked(data_chunk).await?; - let a = child.as_bool(); - let c = (self.f_array)(a); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, row: &OwnedRow) -> crate::Result { - let datum = self.child.eval_row(row).await?; - let scalar = datum.map(|s| *s.as_bool()); - let output_scalar = (self.f_value)(scalar); - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - Ok(output_datum) - } -} - -pub struct BooleanBinaryExpression { - left: BoxedExpression, - right: BoxedExpression, - f_array: FA, - f_value: FV, -} - -impl fmt::Debug for BooleanBinaryExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BooleanBinaryExpression") - .field("left", &self.left) - .field("right", &self.right) - .finish() - } -} - -impl BooleanBinaryExpression -where - FA: Fn(&BoolArray, &BoolArray) -> BoolArray + Send + Sync, - FV: Fn(Option, Option) -> Option + Send + Sync, -{ - pub fn new(left: BoxedExpression, right: BoxedExpression, f_array: FA, f_value: FV) -> Self { - BooleanBinaryExpression { - left, - right, - f_array, - f_value, - } - } -} - -#[async_trait::async_trait] -impl Expression for BooleanBinaryExpression -where - FA: Fn(&BoolArray, &BoolArray) -> BoolArray + Send + Sync, - FV: Fn(Option, Option) -> Option + Send + Sync, -{ - fn return_type(&self) -> DataType { - DataType::Boolean - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let left = self.left.eval_checked(data_chunk).await?; - let right = self.right.eval_checked(data_chunk).await?; - let a = left.as_bool(); - let b = right.as_bool(); - let c = (self.f_array)(a, b); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, row: &OwnedRow) -> crate::Result { - let left = self.left.eval_row(row).await?.map(|s| *s.as_bool()); - let right = self.right.eval_row(row).await?.map(|s| *s.as_bool()); - let output_scalar = (self.f_value)(left, right); - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - Ok(output_datum) - } -} - -pub struct NullaryExpression { - return_type: DataType, - func: F, - _marker: PhantomData, -} - -impl fmt::Debug for NullaryExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NullaryExpression").finish() - } -} - -impl NullaryExpression -where - F: Fn() -> T + Send + Sync, - T: PrimitiveArrayItemType, -{ - #[allow(dead_code)] - pub fn new(return_type: DataType, func: F) -> Self { - NullaryExpression { - return_type, - func, - _marker: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl Expression for NullaryExpression -where - F: Fn() -> T + Send + Sync, - T: PrimitiveArrayItemType, -{ - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let bitmap = match data_chunk.visibility() { - Some(vis) => vis.clone(), - None => Bitmap::ones(data_chunk.capacity()), - }; - let c = PrimitiveArray::::from_iter_bitmap( - std::iter::repeat_with(|| (self.func)()).take(data_chunk.capacity()), - bitmap, - ); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, _row: &OwnedRow) -> crate::Result { - let output_scalar = (self.func)(); - let output_datum = Some(output_scalar.to_scalar_value()); - Ok(output_datum) - } -} - -pub struct UnaryExpression { - child: BoxedExpression, - return_type: DataType, - func: F, - _marker: PhantomData<(A, T)>, -} - -impl fmt::Debug for UnaryExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("UnaryExpression") - .field("child", &self.child) - .finish() - } -} - -impl UnaryExpression -where - F: Fn(A) -> T + Send + Sync, - A: PrimitiveArrayItemType, - T: PrimitiveArrayItemType, - for<'a> &'a PrimitiveArray: From<&'a ArrayImpl>, -{ - pub fn new(child: BoxedExpression, return_type: DataType, func: F) -> Self { - UnaryExpression { - child, - return_type, - func, - _marker: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl Expression for UnaryExpression -where - F: Fn(A) -> T + Send + Sync, - A: PrimitiveArrayItemType, - T: PrimitiveArrayItemType, - for<'a> &'a PrimitiveArray: From<&'a ArrayImpl>, -{ - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let child = self.child.eval_checked(data_chunk).await?; - - let bitmap = match data_chunk.visibility() { - Some(vis) => vis & child.null_bitmap(), - None => child.null_bitmap().clone(), - }; - let a: &PrimitiveArray = (&*child).into(); - let c = PrimitiveArray::::from_iter_bitmap(a.raw_iter().map(|a| (self.func)(a)), bitmap); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, row: &OwnedRow) -> crate::Result { - let datum = self.child.eval_row(row).await?; - let scalar = datum - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - - let output_scalar = scalar.map(&self.func); - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - Ok(output_datum) - } -} - -pub struct BinaryExpression { - left: BoxedExpression, - right: BoxedExpression, - return_type: DataType, - func: F, - _marker: PhantomData<(A, B, T)>, -} - -impl fmt::Debug for BinaryExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BinaryExpression") - .field("left", &self.left) - .field("right", &self.right) - .finish() - } -} - -impl BinaryExpression -where - F: Fn(A, B) -> T + Send + Sync, - A: PrimitiveArrayItemType, - B: PrimitiveArrayItemType, - T: PrimitiveArrayItemType, - for<'a> &'a PrimitiveArray: From<&'a ArrayImpl>, - for<'a> &'a PrimitiveArray: From<&'a ArrayImpl>, -{ - pub fn new( - left: BoxedExpression, - right: BoxedExpression, - return_type: DataType, - func: F, - ) -> Self { - BinaryExpression { - left, - right, - return_type, - func, - _marker: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl Expression for BinaryExpression -where - F: Fn(A, B) -> T + Send + Sync, - A: PrimitiveArrayItemType, - B: PrimitiveArrayItemType, - T: PrimitiveArrayItemType, - for<'a> &'a PrimitiveArray: From<&'a ArrayImpl>, - for<'a> &'a PrimitiveArray: From<&'a ArrayImpl>, -{ - fn return_type(&self) -> DataType { - self.return_type.clone() - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let left = self.left.eval_checked(data_chunk).await?; - let right = self.right.eval_checked(data_chunk).await?; - assert_eq!(left.len(), right.len()); - - let mut bitmap = match data_chunk.visibility() { - Some(vis) => vis.clone(), - None => Bitmap::ones(data_chunk.capacity()), - }; - bitmap &= left.null_bitmap(); - bitmap &= right.null_bitmap(); - let a: &PrimitiveArray = (&*left).into(); - let b: &PrimitiveArray = (&*right).into(); - let c = PrimitiveArray::::from_iter_bitmap( - a.raw_iter() - .zip(b.raw_iter()) - .map(|(a, b)| (self.func)(a, b)), - bitmap, - ); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, row: &OwnedRow) -> crate::Result { - let datum1 = self.left.eval_row(row).await?; - let datum2 = self.right.eval_row(row).await?; - let scalar1 = datum1 - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - let scalar2 = datum2 - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - - let output_scalar = match (scalar1, scalar2) { - (Some(l), Some(r)) => Some((self.func)(l, r)), - _ => None, - }; - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - Ok(output_datum) - } -} - -// Basically the same as `BinaryExpression`, but output the `BoolArray`. -pub struct CompareExpression { - left: BoxedExpression, - right: BoxedExpression, - func: F, - _marker: PhantomData<(A, B)>, -} - -impl fmt::Debug for CompareExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CompareExpression") - .field("left", &self.left) - .field("right", &self.right) - .finish() - } -} - -impl CompareExpression -where - F: Fn(A::RefItem<'_>, B::RefItem<'_>) -> bool + Send + Sync, - A: Array, - B: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> &'a B: std::convert::From<&'a ArrayImpl>, -{ - pub fn new(left: BoxedExpression, right: BoxedExpression, func: F) -> Self { - CompareExpression { - left, - right, - func, - _marker: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl Expression for CompareExpression -where - F: Fn(A::RefItem<'_>, B::RefItem<'_>) -> bool + Send + Sync, - A: Array, - B: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> &'a B: std::convert::From<&'a ArrayImpl>, -{ - fn return_type(&self) -> DataType { - DataType::Boolean - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let left = self.left.eval_checked(data_chunk).await?; - let right = self.right.eval_checked(data_chunk).await?; - assert_eq!(left.len(), right.len()); - - let mut bitmap = match data_chunk.visibility() { - Some(vis) => vis.clone(), - None => Bitmap::ones(data_chunk.capacity()), - }; - bitmap &= left.null_bitmap(); - bitmap &= right.null_bitmap(); - let a: &A = (&*left).into(); - let b: &B = (&*right).into(); - let c = BoolArray::new( - a.raw_iter() - .zip(b.raw_iter()) - .map(|(a, b)| (self.func)(a, b)) - .collect(), - bitmap, - ); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, row: &OwnedRow) -> crate::Result { - let datum1 = self.left.eval_row(row).await?; - let datum2 = self.right.eval_row(row).await?; - let scalar1 = datum1 - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - let scalar2 = datum2 - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - - let output_scalar = match (scalar1, scalar2) { - (Some(l), Some(r)) => Some((self.func)(l, r)), - _ => None, - }; - let output_datum = output_scalar.map(|s| s.to_scalar_value()); - Ok(output_datum) - } -} - -pub struct IsDistinctFromExpression { - left: BoxedExpression, - right: BoxedExpression, - ne: F, - not: bool, - _marker: PhantomData<(A, B)>, -} - -impl fmt::Debug for IsDistinctFromExpression { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("IsDistinctFromExpression") - .field("left", &self.left) - .field("right", &self.right) - .finish() - } -} - -impl IsDistinctFromExpression -where - F: Fn(A::RefItem<'_>, B::RefItem<'_>) -> bool + Send + Sync, - A: Array, - B: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> &'a B: std::convert::From<&'a ArrayImpl>, -{ - #[allow(dead_code)] - pub fn new(left: BoxedExpression, right: BoxedExpression, ne: F, not: bool) -> Self { - IsDistinctFromExpression { - left, - right, - ne, - not, - _marker: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl Expression for IsDistinctFromExpression -where - F: Fn(A::RefItem<'_>, B::RefItem<'_>) -> bool + Send + Sync, - A: Array, - B: Array, - for<'a> &'a A: std::convert::From<&'a ArrayImpl>, - for<'a> &'a B: std::convert::From<&'a ArrayImpl>, -{ - fn return_type(&self) -> DataType { - DataType::Boolean - } - - async fn eval(&self, data_chunk: &DataChunk) -> crate::Result { - let left = self.left.eval_checked(data_chunk).await?; - let right = self.right.eval_checked(data_chunk).await?; - assert_eq!(left.len(), right.len()); - - let a: &A = (&*left).into(); - let b: &B = (&*right).into(); - - let mut data: Bitmap = a - .raw_iter() - .zip(b.raw_iter()) - .map(|(a, b)| (self.ne)(a, b)) - .collect(); - data &= left.null_bitmap(); - data &= right.null_bitmap(); - data |= left.null_bitmap() ^ right.null_bitmap(); - if self.not { - data = !data; - } - let c = BoolArray::new(data, Bitmap::ones(a.len())); - Ok(Arc::new(c.into())) - } - - async fn eval_row(&self, row: &OwnedRow) -> crate::Result { - let datum1 = self.left.eval_row(row).await?; - let datum2 = self.right.eval_row(row).await?; - let scalar1 = datum1 - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - let scalar2 = datum2 - .as_ref() - .map(|s| s.as_scalar_ref_impl().try_into().unwrap()); - - let output_scalar = match (scalar1, scalar2) { - (Some(l), Some(r)) => (self.ne)(l, r), - (Some(_), None) | (None, Some(_)) => true, - (None, None) => false, - } ^ self.not; - Ok(Some(output_scalar.to_scalar_value())) - } -} diff --git a/src/expr/src/sig/agg.rs b/src/expr/src/sig/agg.rs deleted file mode 100644 index a5321ce726aae..0000000000000 --- a/src/expr/src/sig/agg.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; -use std::fmt; -use std::sync::LazyLock; - -use risingwave_common::types::DataTypeName; - -use super::FuncSigDebug; -use crate::agg::{AggCall, AggKind, BoxedAggregateFunction}; -use crate::Result; - -pub static AGG_FUNC_SIG_MAP: LazyLock = LazyLock::new(|| unsafe { - let mut map = AggFuncSigMap::default(); - tracing::info!("{} aggregations loaded.", AGG_FUNC_SIG_MAP_INIT.len()); - for desc in AGG_FUNC_SIG_MAP_INIT.drain(..) { - map.insert(desc); - } - map -}); - -// Same as FuncSign in func.rs except this is for aggregate function -#[derive(PartialEq, Eq, Hash, Clone)] -pub struct AggFuncSig { - pub func: AggKind, - pub inputs_type: &'static [DataTypeName], - pub ret_type: DataTypeName, - pub build: fn(agg: &AggCall) -> Result, -} - -impl fmt::Debug for AggFuncSig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - FuncSigDebug { - func: self.func, - inputs_type: self.inputs_type, - ret_type: self.ret_type, - set_returning: false, - deprecated: false, - } - .fmt(f) - } -} - -// Same as FuncSigMap in func.rs except this is for aggregate function -#[derive(Default)] -pub struct AggFuncSigMap(HashMap<(AggKind, usize), Vec>); - -impl AggFuncSigMap { - fn insert(&mut self, sig: AggFuncSig) { - let arity = sig.inputs_type.len(); - self.0.entry((sig.func, arity)).or_default().push(sig); - } - - /// Returns a function signature with the given type, argument types and return type. - pub fn get( - &self, - ty: AggKind, - args: &[DataTypeName], - ret: DataTypeName, - ) -> Option<&AggFuncSig> { - let v = self.0.get(&(ty, args.len()))?; - v.iter() - .find(|d| d.inputs_type == args && d.ret_type == ret) - } - - /// Returns the return type for the given function and arguments. - pub fn get_return_type(&self, ty: AggKind, args: &[DataTypeName]) -> Option { - let v = self.0.get(&(ty, args.len()))?; - v.iter().find(|d| d.inputs_type == args).map(|d| d.ret_type) - } -} - -/// The table of function signatures. -pub fn agg_func_sigs() -> impl Iterator { - AGG_FUNC_SIG_MAP.0.values().flatten() -} - -/// Register a function into global registry. -/// -/// # Safety -/// -/// This function must be called sequentially. -/// -/// It is designed to be used by `#[aggregate]` macro. -/// Users SHOULD NOT call this function. -#[doc(hidden)] -pub unsafe fn _register(desc: AggFuncSig) { - AGG_FUNC_SIG_MAP_INIT.push(desc); -} - -/// The global registry of function signatures on initialization. -/// -/// `#[aggregate]` macro will generate a `#[ctor]` function to register the signature into this -/// vector. The calls are guaranteed to be sequential. The vector will be drained and moved into -/// `AGG_FUNC_SIG_MAP` on the first access of `AGG_FUNC_SIG_MAP`. -static mut AGG_FUNC_SIG_MAP_INIT: Vec = Vec::new(); diff --git a/src/expr/src/sig/func.rs b/src/expr/src/sig/func.rs deleted file mode 100644 index 5dca4da2f4486..0000000000000 --- a/src/expr/src/sig/func.rs +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Function signatures. - -use std::collections::HashMap; -use std::fmt; -use std::sync::LazyLock; - -use risingwave_common::types::{DataType, DataTypeName}; -use risingwave_pb::expr::expr_node::PbType; - -use super::FuncSigDebug; -use crate::error::Result; -use crate::expr::BoxedExpression; - -pub static FUNC_SIG_MAP: LazyLock = LazyLock::new(|| unsafe { - let mut map = FuncSigMap::default(); - tracing::info!("{} function signatures loaded.", FUNC_SIG_MAP_INIT.len()); - for desc in FUNC_SIG_MAP_INIT.drain(..) { - map.insert(desc); - } - map -}); - -/// The table of function signatures. -pub fn func_sigs() -> impl Iterator { - FUNC_SIG_MAP.0.values().flatten() -} - -#[derive(Default, Clone, Debug)] -pub struct FuncSigMap(HashMap<(PbType, usize), Vec>); - -impl FuncSigMap { - /// Inserts a function signature. - pub fn insert(&mut self, desc: FuncSign) { - self.0 - .entry((desc.func, desc.inputs_type.len())) - .or_default() - .push(desc) - } - - /// Returns a function signature with the same type, argument types and return type. - /// Deprecated functions are included. - pub fn get(&self, ty: PbType, args: &[DataTypeName], ret: DataTypeName) -> Option<&FuncSign> { - let v = self.0.get(&(ty, args.len()))?; - v.iter() - .find(|d| d.inputs_type == args && d.ret_type == ret) - } - - /// Returns all function signatures with the same type and number of arguments. - /// Deprecated functions are excluded. - pub fn get_with_arg_nums(&self, ty: PbType, nargs: usize) -> Vec<&FuncSign> { - match self.0.get(&(ty, nargs)) { - Some(v) => v.iter().filter(|d| !d.deprecated).collect(), - None => vec![], - } - } -} - -/// A function signature. -#[derive(Clone)] -pub struct FuncSign { - pub func: PbType, - pub inputs_type: &'static [DataTypeName], - pub ret_type: DataTypeName, - pub build: fn(return_type: DataType, children: Vec) -> Result, - /// Whether the function is deprecated and should not be used in the frontend. - /// For backward compatibility, it is still available in the backend. - pub deprecated: bool, -} - -impl fmt::Debug for FuncSign { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - FuncSigDebug { - func: self.func.as_str_name(), - inputs_type: self.inputs_type, - ret_type: self.ret_type, - set_returning: false, - deprecated: self.deprecated, - } - .fmt(f) - } -} - -/// Register a function into global registry. -/// -/// # Safety -/// -/// This function must be called sequentially. -/// -/// It is designed to be used by `#[function]` macro. -/// Users SHOULD NOT call this function. -#[doc(hidden)] -pub unsafe fn _register(desc: FuncSign) { - FUNC_SIG_MAP_INIT.push(desc) -} - -/// The global registry of function signatures on initialization. -/// -/// `#[function]` macro will generate a `#[ctor]` function to register the signature into this -/// vector. The calls are guaranteed to be sequential. The vector will be drained and moved into -/// `FUNC_SIG_MAP` on the first access of `FUNC_SIG_MAP`. -static mut FUNC_SIG_MAP_INIT: Vec = Vec::new(); - -#[cfg(test)] -mod tests { - use std::collections::BTreeMap; - - use itertools::Itertools; - - use super::*; - - #[test] - fn test_func_sig_map() { - // convert FUNC_SIG_MAP to a more convenient map for testing - let mut new_map: BTreeMap, Vec>> = - BTreeMap::new(); - for ((func, num_args), sigs) in &FUNC_SIG_MAP.0 { - for sig in sigs { - // validate the FUNC_SIG_MAP is consistent - assert_eq!(func, &sig.func); - assert_eq!(num_args, &sig.inputs_type.len()); - // exclude deprecated functions - if sig.deprecated { - continue; - } - - new_map - .entry(*func) - .or_default() - .entry(sig.inputs_type.to_vec()) - .or_default() - .push(sig.clone()); - } - } - - let duplicated: BTreeMap<_, Vec<_>> = new_map - .into_iter() - .filter_map(|(k, funcs_with_same_name)| { - let funcs_with_same_name_type: Vec<_> = funcs_with_same_name - .into_values() - .filter_map(|v| { - if v.len() > 1 { - Some( - format!( - "{:}({:?}) -> {:?}", - v[0].func.as_str_name(), - v[0].inputs_type.iter().format(", "), - v.iter().map(|sig| sig.ret_type).format("/") - ) - .to_ascii_lowercase(), - ) - } else { - None - } - }) - .collect(); - if !funcs_with_same_name_type.is_empty() { - Some((k, funcs_with_same_name_type)) - } else { - None - } - }) - .collect(); - - // This snapshot shows the function signatures without a unique match. Frontend has to - // handle them specially without relying on FuncSigMap. - let expected = expect_test::expect![[r#" - { - Cast: [ - "cast(boolean) -> int32/varchar", - "cast(int16) -> int256/decimal/float64/float32/int64/int32/varchar", - "cast(int32) -> int256/int16/decimal/float64/float32/int64/boolean/varchar", - "cast(int64) -> int256/int32/int16/decimal/float64/float32/varchar", - "cast(float32) -> decimal/int64/int32/int16/float64/varchar", - "cast(float64) -> decimal/float32/int64/int32/int16/varchar", - "cast(decimal) -> float64/float32/int64/int32/int16/varchar", - "cast(date) -> timestamp/varchar", - "cast(varchar) -> date/time/timestamp/jsonb/interval/int256/float32/float64/decimal/int16/int32/int64/varchar/boolean/bytea/list", - "cast(time) -> interval/varchar", - "cast(timestamp) -> date/time/varchar", - "cast(interval) -> time/varchar", - "cast(list) -> varchar/list", - "cast(jsonb) -> boolean/float64/float32/decimal/int64/int32/int16/varchar", - "cast(int256) -> float64/varchar", - ], - ArrayAccess: [ - "array_access(list, int32) -> boolean/int16/int32/int64/int256/float32/float64/decimal/serial/date/time/timestamp/timestamptz/interval/varchar/bytea/jsonb/list/struct", - ], - ArrayMin: [ - "array_min(list) -> bytea/varchar/timestamptz/timestamp/time/date/int256/serial/decimal/float32/float64/int16/int32/int64", - ], - ArrayMax: [ - "array_max(list) -> bytea/varchar/timestamptz/timestamp/time/date/int256/serial/decimal/float32/float64/int16/int32/int64", - ], - } - "#]]; - expected.assert_debug_eq(&duplicated); - } -} diff --git a/src/expr/src/sig/mod.rs b/src/expr/src/sig/mod.rs deleted file mode 100644 index cea417a3ca4ee..0000000000000 --- a/src/expr/src/sig/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Metadata of expressions. - -use itertools::Itertools; -use risingwave_common::types::DataTypeName; - -pub mod agg; -pub mod cast; -pub mod func; -pub mod table_function; - -/// Utility struct for debug printing of function signature. -pub(crate) struct FuncSigDebug<'a, T> { - pub func: T, - pub inputs_type: &'a [DataTypeName], - pub ret_type: DataTypeName, - pub set_returning: bool, - pub deprecated: bool, -} - -impl<'a, T: std::fmt::Display> std::fmt::Debug for FuncSigDebug<'a, T> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = format!( - "{}({:?}) -> {}{:?}{}", - self.func, - self.inputs_type.iter().format(", "), - if self.set_returning { "setof " } else { "" }, - self.ret_type, - if self.deprecated { " [deprecated]" } else { "" }, - ) - .to_ascii_lowercase(); - - f.write_str(&s) - } -} diff --git a/src/expr/src/sig/table_function.rs b/src/expr/src/sig/table_function.rs deleted file mode 100644 index a8ebce5e378bd..0000000000000 --- a/src/expr/src/sig/table_function.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Function signatures. - -use std::collections::HashMap; -use std::fmt; -use std::ops::Deref; -use std::sync::LazyLock; - -use risingwave_common::types::{DataType, DataTypeName}; -use risingwave_pb::expr::table_function::PbType; - -use super::FuncSigDebug; -use crate::error::Result; -use crate::expr::BoxedExpression; -use crate::table_function::BoxedTableFunction; - -pub static FUNC_SIG_MAP: LazyLock = LazyLock::new(|| unsafe { - let mut map = FuncSigMap::default(); - tracing::info!( - "{} table function signatures loaded.", - FUNC_SIG_MAP_INIT.len() - ); - for desc in FUNC_SIG_MAP_INIT.drain(..) { - map.insert(desc); - } - map -}); - -/// The table of function signatures. -pub fn func_sigs() -> impl Iterator { - FUNC_SIG_MAP.0.values().flatten() -} - -#[derive(Default, Clone, Debug)] -pub struct FuncSigMap(HashMap<(PbType, usize), Vec>); - -impl FuncSigMap { - /// Inserts a function signature. - pub fn insert(&mut self, desc: FuncSign) { - self.0 - .entry((desc.func, desc.inputs_type.len())) - .or_default() - .push(desc) - } - - /// Returns a function signature with the same type and argument types. - pub fn get(&self, ty: PbType, args: &[DataTypeName]) -> Option<&FuncSign> { - let v = self.0.get(&(ty, args.len()))?; - v.iter().find(|d| d.inputs_type == args) - } - - /// Returns all function signatures with the same type and number of arguments. - pub fn get_with_arg_nums(&self, ty: PbType, nargs: usize) -> &[FuncSign] { - self.0.get(&(ty, nargs)).map_or(&[], Deref::deref) - } -} - -/// A function signature. -#[derive(Clone)] -pub struct FuncSign { - pub func: PbType, - pub inputs_type: &'static [DataTypeName], - pub ret_type: DataTypeName, - pub build: fn( - return_type: DataType, - chunk_size: usize, - children: Vec, - ) -> Result, - /// A function to infer the return type from argument types. - pub type_infer: fn(args: &[DataType]) -> Result, -} - -impl fmt::Debug for FuncSign { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - FuncSigDebug { - func: self.func.as_str_name(), - inputs_type: self.inputs_type, - ret_type: self.ret_type, - set_returning: true, - deprecated: false, - } - .fmt(f) - } -} - -/// Register a function into global registry. -/// -/// # Safety -/// -/// This function must be called sequentially. -/// -/// It is designed to be used by `#[table_function]` macro. -/// Users SHOULD NOT call this function. -#[doc(hidden)] -pub unsafe fn _register(desc: FuncSign) { - FUNC_SIG_MAP_INIT.push(desc) -} - -/// The global registry of function signatures on initialization. -/// -/// `#[table_function]` macro will generate a `#[ctor]` function to register the signature into this -/// vector. The calls are guaranteed to be sequential. The vector will be drained and moved into -/// `FUNC_SIG_MAP` on the first access of `FUNC_SIG_MAP`. -static mut FUNC_SIG_MAP_INIT: Vec = Vec::new(); diff --git a/src/expr/src/vector_op/array_min_max.rs b/src/expr/src/vector_op/array_min_max.rs deleted file mode 100644 index 1ff1a4086f2cd..0000000000000 --- a/src/expr/src/vector_op/array_min_max.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use risingwave_common::array::*; -use risingwave_common::types::{DefaultOrdered, Scalar, ToOwnedDatum}; -use risingwave_expr_macro::function; - -use crate::Result; - -/// FIXME: #[`function("array_min(list`) -> any")] supports -/// In this way we could avoid manual macro expansion -#[function("array_min(list) -> *int")] -#[function("array_min(list) -> *float")] -#[function("array_min(list) -> decimal")] -#[function("array_min(list) -> serial")] -#[function("array_min(list) -> int256")] -#[function("array_min(list) -> date")] -#[function("array_min(list) -> time")] -#[function("array_min(list) -> timestamp")] -#[function("array_min(list) -> timestamptz")] -#[function("array_min(list) -> varchar")] -#[function("array_min(list) -> bytea")] -pub fn array_min(list: ListRef<'_>) -> Result> { - let min_value = list.iter().flatten().map(DefaultOrdered).min(); - match min_value.map(|v| v.0).to_owned_datum() { - Some(s) => Ok(Some(s.try_into()?)), - None => Ok(None), - } -} - -#[function("array_max(list) -> *int")] -#[function("array_max(list) -> *float")] -#[function("array_max(list) -> decimal")] -#[function("array_max(list) -> serial")] -#[function("array_max(list) -> int256")] -#[function("array_max(list) -> date")] -#[function("array_max(list) -> time")] -#[function("array_max(list) -> timestamp")] -#[function("array_max(list) -> timestamptz")] -#[function("array_max(list) -> varchar")] -#[function("array_max(list) -> bytea")] -pub fn array_max(list: ListRef<'_>) -> Result> { - let max_value = list.iter().flatten().map(DefaultOrdered).max(); - match max_value.map(|v| v.0).to_owned_datum() { - Some(s) => Ok(Some(s.try_into()?)), - None => Ok(None), - } -} diff --git a/src/expr/src/vector_op/cast.rs b/src/expr/src/vector_op/cast.rs deleted file mode 100644 index f17754396022a..0000000000000 --- a/src/expr/src/vector_op/cast.rs +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::fmt::Write; -use std::str::FromStr; - -use futures_util::FutureExt; -use itertools::Itertools; -use risingwave_common::array::{ - ListArray, ListRef, ListValue, StructArray, StructRef, StructValue, Utf8Array, -}; -use risingwave_common::cast::{ - parse_naive_date, parse_naive_datetime, parse_naive_time, str_to_bytea as str_to_bytea_common, -}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{ - DataType, Date, Decimal, Int256, Interval, IntoOrdered, JsonbRef, ScalarImpl, StructType, Time, - Timestamp, Timestamptz, ToText, F32, F64, -}; -use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr_macro::{build_function, function}; -use risingwave_pb::expr::expr_node::PbType; - -use crate::expr::template::UnaryExpression; -use crate::expr::{build_func, BoxedExpression, Expression, InputRefExpression}; -use crate::{ExprError, Result}; - -/// String literals for bool type. -/// -/// See [`https://www.postgresql.org/docs/9.5/datatype-boolean.html`] -const TRUE_BOOL_LITERALS: [&str; 9] = ["true", "tru", "tr", "t", "on", "1", "yes", "ye", "y"]; -const FALSE_BOOL_LITERALS: [&str; 10] = [ - "false", "fals", "fal", "fa", "f", "off", "of", "0", "no", "n", -]; - -#[function("cast(varchar) -> date")] -pub fn str_to_date(elem: &str) -> Result { - Ok(Date::new( - parse_naive_date(elem).map_err(|err| ExprError::Parse(err.into()))?, - )) -} - -#[function("cast(varchar) -> time")] -pub fn str_to_time(elem: &str) -> Result

) + Invalid input syntax: Invalid aggregation: avg(struct
) create_source: format: plain encode: protobuf @@ -346,7 +346,7 @@ - sql: | create materialized view t as select * from s; select (country).address.* from t; - binder_error: 'Bind error: type "varchar" is not composite' + binder_error: 'Bind error: type "character varying" is not composite' create_source: format: plain encode: protobuf diff --git a/src/frontend/planner_test/tests/testdata/output/subquery.yaml b/src/frontend/planner_test/tests/testdata/output/subquery.yaml index 21c3cfb03847a..e07e84e040929 100644 --- a/src/frontend/planner_test/tests/testdata/output/subquery.yaml +++ b/src/frontend/planner_test/tests/testdata/output/subquery.yaml @@ -346,21 +346,22 @@ └─BatchScan { table: auction, columns: [auction.date_time], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [date_time, window_start, window_end, auction._row_id(hidden)], stream_key: [auction._row_id, window_start, window_end, date_time], pk_columns: [auction._row_id, window_start, window_end, date_time], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: auction.date_time IS NOT DISTINCT FROM auction.date_time, output: all } - ├─StreamExchange { dist: HashShard(auction.date_time) } - │ └─StreamShare { id: 3 } - │ └─StreamHopWindow { time_col: auction.date_time, slide: 00:00:01, size: 01:00:00, output: [auction.date_time, window_start, window_end, auction._row_id] } - │ └─StreamFilter { predicate: IsNotNull(auction.date_time) } - │ └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } - └─StreamProject { exprs: [auction.date_time] } - └─StreamHashAgg { group_key: [auction.date_time], aggs: [count] } - └─StreamProject { exprs: [auction.date_time] } - └─StreamHashAgg { group_key: [auction.date_time], aggs: [count] } - └─StreamExchange { dist: HashShard(auction.date_time) } - └─StreamShare { id: 3 } - └─StreamHopWindow { time_col: auction.date_time, slide: 00:00:01, size: 01:00:00, output: [auction.date_time, window_start, window_end, auction._row_id] } - └─StreamFilter { predicate: IsNotNull(auction.date_time) } - └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } + └─StreamExchange { dist: HashShard(auction.date_time, window_start, window_end, auction._row_id) } + └─StreamHashJoin { type: LeftSemi, predicate: auction.date_time IS NOT DISTINCT FROM auction.date_time, output: all } + ├─StreamExchange { dist: HashShard(auction.date_time) } + │ └─StreamShare { id: 3 } + │ └─StreamHopWindow { time_col: auction.date_time, slide: 00:00:01, size: 01:00:00, output: [auction.date_time, window_start, window_end, auction._row_id] } + │ └─StreamFilter { predicate: IsNotNull(auction.date_time) } + │ └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } + └─StreamProject { exprs: [auction.date_time] } + └─StreamHashAgg { group_key: [auction.date_time], aggs: [count] } + └─StreamProject { exprs: [auction.date_time] } + └─StreamHashAgg { group_key: [auction.date_time], aggs: [count] } + └─StreamExchange { dist: HashShard(auction.date_time) } + └─StreamShare { id: 3 } + └─StreamHopWindow { time_col: auction.date_time, slide: 00:00:01, size: 01:00:00, output: [auction.date_time, window_start, window_end, auction._row_id] } + └─StreamFilter { predicate: IsNotNull(auction.date_time) } + └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } - sql: | CREATE TABLE t (v int); SELECT 1 FROM t AS t_inner WHERE EXISTS ( SELECT 1 HAVING t_inner.v > 1); @@ -534,23 +535,24 @@ └─BatchExchange { order: [], dist: HashShard(t.x) } └─BatchScan { table: t, columns: [t.x], distribution: SomeShard } stream_plan: |- - StreamMaterialize { columns: [x, y, k, sum_x, t.x(hidden)], stream_key: [k, t.x, x], pk_columns: [k, t.x, x], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftOuter, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, t.y, t.k, sum(Unnest($0)), t.x] } - ├─StreamExchange { dist: HashShard(t.x) } - │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.k], pk: [t.k], dist: UpstreamHashShard(t.k) } - └─StreamProject { exprs: [t.x, sum(Unnest($0))] } - └─StreamHashAgg { group_key: [t.x], aggs: [sum(Unnest($0)), count] } - └─StreamHashJoin { type: LeftOuter, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, Unnest($0), t.x, projected_row_id] } - ├─StreamProject { exprs: [t.x] } - │ └─StreamHashAgg { group_key: [t.x], aggs: [count] } - │ └─StreamExchange { dist: HashShard(t.x) } - │ └─StreamTableScan { table: t, columns: [t.x, t.k], pk: [t.k], dist: UpstreamHashShard(t.k) } - └─StreamProject { exprs: [t.x, Unnest($0), projected_row_id] } - └─StreamProjectSet { select_list: [$0, Unnest($0)] } - └─StreamProject { exprs: [t.x] } - └─StreamHashAgg { group_key: [t.x], aggs: [count] } - └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.k], pk: [t.k], dist: UpstreamHashShard(t.k) } + StreamMaterialize { columns: [x, y, k, sum_x, t.x(hidden)], stream_key: [k, x], pk_columns: [k, x], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(t.x, t.k) } + └─StreamHashJoin { type: LeftOuter, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, t.y, t.k, sum(Unnest($0)), t.x] } + ├─StreamExchange { dist: HashShard(t.x) } + │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.k], pk: [t.k], dist: UpstreamHashShard(t.k) } + └─StreamProject { exprs: [t.x, sum(Unnest($0))] } + └─StreamHashAgg { group_key: [t.x], aggs: [sum(Unnest($0)), count] } + └─StreamHashJoin { type: LeftOuter, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, Unnest($0), t.x, projected_row_id] } + ├─StreamProject { exprs: [t.x] } + │ └─StreamHashAgg { group_key: [t.x], aggs: [count] } + │ └─StreamExchange { dist: HashShard(t.x) } + │ └─StreamTableScan { table: t, columns: [t.x, t.k], pk: [t.k], dist: UpstreamHashShard(t.k) } + └─StreamProject { exprs: [t.x, Unnest($0), projected_row_id] } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.x] } + └─StreamHashAgg { group_key: [t.x], aggs: [count] } + └─StreamExchange { dist: HashShard(t.x) } + └─StreamTableScan { table: t, columns: [t.x, t.k], pk: [t.k], dist: UpstreamHashShard(t.k) } - name: CorrelatedInputRef in ProjectSet and apply on condition is true. sql: | create table t(x int[], y int[], k int primary key); @@ -582,29 +584,29 @@ create table t(x int[], y int[], k int primary key); select *, (select sum(i) from (select unnest(x) i, 1 c) Q where k = c ) as sum_x from t; optimized_logical_plan_for_batch: |- - LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(t.x, t.x) AND IsNotDistinctFrom(t.k, t.k), output: [t.x, t.y, t.k, sum(Unnest($0))] } + LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(t.x, internal_last_seen_value(t.x)) AND IsNotDistinctFrom(t.k, t.k), output: [t.x, t.y, t.k, sum(Unnest($0))] } ├─LogicalScan { table: t, columns: [t.x, t.y, t.k] } - └─LogicalAgg { group_key: [t.x, t.k], aggs: [sum(Unnest($0))] } - └─LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(t.x, t.x) AND IsNotDistinctFrom(t.k, t.k), output: [t.x, t.k, Unnest($0)] } - ├─LogicalAgg { group_key: [t.x, t.k], aggs: [] } + └─LogicalAgg { group_key: [internal_last_seen_value(t.x), t.k], aggs: [sum(Unnest($0))] } + └─LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(internal_last_seen_value(t.x), internal_last_seen_value(t.x)) AND IsNotDistinctFrom(t.k, t.k), output: [internal_last_seen_value(t.x), t.k, Unnest($0)] } + ├─LogicalAgg { group_key: [t.k], aggs: [internal_last_seen_value(t.x)] } │ └─LogicalScan { table: t, columns: [t.x, t.k] } - └─LogicalProject { exprs: [t.x, t.k, Unnest($0)] } + └─LogicalProject { exprs: [internal_last_seen_value(t.x), t.k, Unnest($0)] } └─LogicalProjectSet { select_list: [$0, $1, Unnest($0)] } - └─LogicalJoin { type: Inner, on: true, output: all } - ├─LogicalAgg { group_key: [t.x, t.k], aggs: [] } + └─LogicalJoin { type: Inner, on: true, output: [internal_last_seen_value(t.x), t.k] } + ├─LogicalAgg { group_key: [t.k], aggs: [internal_last_seen_value(t.x)] } │ └─LogicalScan { table: t, columns: [t.x, t.k], predicate: (t.k = 1:Int32) } └─LogicalValues { rows: [[]], schema: Schema { fields: [] } } optimized_logical_plan_for_stream: |- - LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(t.x, t.x) AND IsNotDistinctFrom(t.k, t.k), output: [t.x, t.y, t.k, sum(Unnest($0))] } + LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(t.x, internal_last_seen_value(t.x)) AND IsNotDistinctFrom(t.k, t.k), output: [t.x, t.y, t.k, sum(Unnest($0))] } ├─LogicalScan { table: t, columns: [t.x, t.y, t.k] } - └─LogicalAgg { group_key: [t.x, t.k], aggs: [sum(Unnest($0))] } - └─LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(t.x, t.x) AND IsNotDistinctFrom(t.k, t.k), output: [t.x, t.k, Unnest($0)] } - ├─LogicalAgg { group_key: [t.x, t.k], aggs: [] } + └─LogicalAgg { group_key: [internal_last_seen_value(t.x), t.k], aggs: [sum(Unnest($0))] } + └─LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(internal_last_seen_value(t.x), internal_last_seen_value(t.x)) AND IsNotDistinctFrom(t.k, t.k), output: [internal_last_seen_value(t.x), t.k, Unnest($0)] } + ├─LogicalAgg { group_key: [t.k], aggs: [internal_last_seen_value(t.x)] } │ └─LogicalScan { table: t, columns: [t.x, t.k] } - └─LogicalProject { exprs: [t.x, t.k, Unnest($0)] } + └─LogicalProject { exprs: [internal_last_seen_value(t.x), t.k, Unnest($0)] } └─LogicalProjectSet { select_list: [$0, $1, Unnest($0)] } - └─LogicalJoin { type: Inner, on: true, output: all } - ├─LogicalAgg { group_key: [t.x, t.k], aggs: [] } + └─LogicalJoin { type: Inner, on: true, output: [internal_last_seen_value(t.x), t.k] } + ├─LogicalAgg { group_key: [t.k], aggs: [internal_last_seen_value(t.x)] } │ └─LogicalScan { table: t, columns: [t.x, t.k], predicate: (t.k = 1:Int32) } └─LogicalValues { rows: [[]], schema: Schema { fields: [] } } - name: CorrelatedInputRef in ProjectSet and apply on condition refers to table function. @@ -631,17 +633,18 @@ └─BatchFilter { predicate: IsNotNull(integers.i) } └─BatchScan { table: integers, columns: [integers.i], distribution: SomeShard } stream_plan: |- - StreamMaterialize { columns: [i, col, integers._row_id(hidden), integers.i(hidden)], stream_key: [integers._row_id, integers.i, i], pk_columns: [i, integers._row_id, integers.i], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftOuter, predicate: integers.i IS NOT DISTINCT FROM integers.i, output: [integers.i, row_number, integers._row_id, integers.i] } - ├─StreamExchange { dist: HashShard(integers.i) } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - └─StreamGroupTopN { order: [integers.i ASC], limit: 1, offset: 0, group_key: [integers.i] } - └─StreamProject { exprs: [integers.i, row_number, integers._row_id] } - └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY integers.i ORDER BY integers.i ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } - └─StreamExchange { dist: HashShard(integers.i) } - └─StreamProject { exprs: [integers.i, integers.i, integers._row_id] } - └─StreamFilter { predicate: IsNotNull(integers.i) } - └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + StreamMaterialize { columns: [i, col, integers._row_id(hidden), integers.i(hidden)], stream_key: [integers._row_id, i], pk_columns: [i, integers._row_id], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(integers.i, integers._row_id) } + └─StreamHashJoin { type: LeftOuter, predicate: integers.i IS NOT DISTINCT FROM integers.i, output: [integers.i, row_number, integers._row_id, integers.i] } + ├─StreamExchange { dist: HashShard(integers.i) } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamGroupTopN { order: [integers.i ASC], limit: 1, offset: 0, group_key: [integers.i] } + └─StreamProject { exprs: [integers.i, row_number, integers._row_id] } + └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY integers.i ORDER BY integers.i ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } + └─StreamExchange { dist: HashShard(integers.i) } + └─StreamProject { exprs: [integers.i, integers.i, integers._row_id] } + └─StreamFilter { predicate: IsNotNull(integers.i) } + └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - name: test over window subquery 2 (with nested loop join so cannot be transformed into a stream plan) sql: | CREATE TABLE integers(i INTEGER); @@ -689,17 +692,18 @@ └─BatchFilter { predicate: IsNotNull(integers.i) } └─BatchScan { table: integers, columns: [integers.i], distribution: SomeShard } stream_plan: |- - StreamMaterialize { columns: [i, col, integers._row_id(hidden), integers.i(hidden)], stream_key: [integers._row_id, integers.i, i], pk_columns: [i, integers._row_id, integers.i], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftOuter, predicate: integers.i IS NOT DISTINCT FROM integers.i, output: [integers.i, sum, integers._row_id, integers.i] } - ├─StreamExchange { dist: HashShard(integers.i) } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - └─StreamGroupTopN { order: [integers.i ASC], limit: 1, offset: 0, group_key: [integers.i] } - └─StreamProject { exprs: [integers.i, sum, integers._row_id] } - └─StreamOverWindow { window_functions: [sum(integers.i) OVER(PARTITION BY integers.i ORDER BY integers.i ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } - └─StreamExchange { dist: HashShard(integers.i) } - └─StreamProject { exprs: [integers.i, integers.i, integers._row_id] } - └─StreamFilter { predicate: IsNotNull(integers.i) } - └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + StreamMaterialize { columns: [i, col, integers._row_id(hidden), integers.i(hidden)], stream_key: [integers._row_id, i], pk_columns: [i, integers._row_id], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(integers.i, integers._row_id) } + └─StreamHashJoin { type: LeftOuter, predicate: integers.i IS NOT DISTINCT FROM integers.i, output: [integers.i, sum, integers._row_id, integers.i] } + ├─StreamExchange { dist: HashShard(integers.i) } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamGroupTopN { order: [integers.i ASC], limit: 1, offset: 0, group_key: [integers.i] } + └─StreamProject { exprs: [integers.i, sum, integers._row_id] } + └─StreamOverWindow { window_functions: [sum(integers.i) OVER(PARTITION BY integers.i ORDER BY integers.i ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } + └─StreamExchange { dist: HashShard(integers.i) } + └─StreamProject { exprs: [integers.i, integers.i, integers._row_id] } + └─StreamFilter { predicate: IsNotNull(integers.i) } + └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - name: test over window subquery 4 (with nested loop join so cannot be transformed into a stream plan) sql: | CREATE TABLE integers(i INTEGER); @@ -747,17 +751,18 @@ └─BatchScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [i, integers._row_id(hidden), $expr1(hidden), integers.correlated_col(hidden)], stream_key: [integers._row_id, $expr1, integers.correlated_col], pk_columns: [integers._row_id, $expr1, integers.correlated_col], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: $expr1 = sum AND integers.correlated_col IS NOT DISTINCT FROM rows.correlated_col, output: [integers.i, integers._row_id, $expr1, integers.correlated_col] } - ├─StreamExchange { dist: HashShard(integers.correlated_col, $expr1) } - │ └─StreamProject { exprs: [integers.i, integers.correlated_col, integers.i::Int64 as $expr1, integers._row_id] } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - └─StreamExchange { dist: HashShard(rows.correlated_col, sum) } - └─StreamProject { exprs: [rows.correlated_col, sum, rows._row_id, rows.k] } - └─StreamOverWindow { window_functions: [sum(rows.v) OVER(PARTITION BY rows.correlated_col, rows.k ORDER BY rows.v ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } - └─StreamExchange { dist: HashShard(rows.correlated_col, rows.k) } - └─StreamProject { exprs: [rows.correlated_col, rows.k, rows.v, rows._row_id] } - └─StreamFilter { predicate: IsNotNull(rows.correlated_col) } - └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } + └─StreamExchange { dist: HashShard(integers._row_id, $expr1, integers.correlated_col) } + └─StreamHashJoin { type: LeftSemi, predicate: $expr1 = sum AND integers.correlated_col IS NOT DISTINCT FROM rows.correlated_col, output: [integers.i, integers._row_id, $expr1, integers.correlated_col] } + ├─StreamExchange { dist: HashShard(integers.correlated_col, $expr1) } + │ └─StreamProject { exprs: [integers.i, integers.correlated_col, integers.i::Int64 as $expr1, integers._row_id] } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamExchange { dist: HashShard(rows.correlated_col, sum) } + └─StreamProject { exprs: [rows.correlated_col, sum, rows._row_id, rows.k] } + └─StreamOverWindow { window_functions: [sum(rows.v) OVER(PARTITION BY rows.correlated_col, rows.k ORDER BY rows.v ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } + └─StreamExchange { dist: HashShard(rows.correlated_col, rows.k) } + └─StreamProject { exprs: [rows.correlated_col, rows.k, rows.v, rows._row_id] } + └─StreamFilter { predicate: IsNotNull(rows.correlated_col) } + └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } - name: test cardinality visitor with correlated filter sql: | CREATE TABLE t1(i INT); @@ -818,18 +823,44 @@ └─BatchScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [i, correlated_col, integers._row_id(hidden), 2:Int64(hidden)], stream_key: [integers._row_id, correlated_col, 2:Int64], pk_columns: [integers._row_id, correlated_col, 2:Int64], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: integers.correlated_col IS NOT DISTINCT FROM integers.correlated_col AND 2:Int64 = $expr1, output: [integers.i, integers.correlated_col, integers._row_id, 2:Int64] } - ├─StreamExchange { dist: HashShard(integers.correlated_col) } - │ └─StreamProject { exprs: [integers.i, integers.correlated_col, 2:Int64, integers._row_id] } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - └─StreamProject { exprs: [integers.correlated_col, (count(distinct rows.k) + count(distinct rows.v)) as $expr1] } - └─StreamHashAgg { group_key: [integers.correlated_col], aggs: [count(distinct rows.k), count(distinct rows.v), count] } - └─StreamHashJoin { type: LeftOuter, predicate: integers.correlated_col IS NOT DISTINCT FROM rows.correlated_col, output: [integers.correlated_col, rows.k, rows.v, rows._row_id] } - ├─StreamProject { exprs: [integers.correlated_col] } - │ └─StreamHashAgg { group_key: [integers.correlated_col], aggs: [count] } - │ └─StreamExchange { dist: HashShard(integers.correlated_col) } - │ └─StreamTableScan { table: integers, columns: [integers.correlated_col, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - └─StreamExchange { dist: HashShard(rows.correlated_col) } - └─StreamProject { exprs: [rows.correlated_col, rows.k, rows.v, rows._row_id] } - └─StreamFilter { predicate: IsNotNull(rows.correlated_col) } - └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } + └─StreamExchange { dist: HashShard(integers.correlated_col, integers._row_id, 2:Int64) } + └─StreamHashJoin { type: LeftSemi, predicate: integers.correlated_col IS NOT DISTINCT FROM integers.correlated_col AND 2:Int64 = $expr1, output: [integers.i, integers.correlated_col, integers._row_id, 2:Int64] } + ├─StreamExchange { dist: HashShard(integers.correlated_col) } + │ └─StreamProject { exprs: [integers.i, integers.correlated_col, 2:Int64, integers._row_id] } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamProject { exprs: [integers.correlated_col, (count(distinct rows.k) + count(distinct rows.v)) as $expr1] } + └─StreamHashAgg { group_key: [integers.correlated_col], aggs: [count(distinct rows.k), count(distinct rows.v), count] } + └─StreamHashJoin { type: LeftOuter, predicate: integers.correlated_col IS NOT DISTINCT FROM rows.correlated_col, output: [integers.correlated_col, rows.k, rows.v, rows._row_id] } + ├─StreamProject { exprs: [integers.correlated_col] } + │ └─StreamHashAgg { group_key: [integers.correlated_col], aggs: [count] } + │ └─StreamExchange { dist: HashShard(integers.correlated_col) } + │ └─StreamTableScan { table: integers, columns: [integers.correlated_col, integers._row_id], pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamExchange { dist: HashShard(rows.correlated_col) } + └─StreamProject { exprs: [rows.correlated_col, rows.k, rows.v, rows._row_id] } + └─StreamFilter { predicate: IsNotNull(rows.correlated_col) } + └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } +- name: test hop window subquery 1 + sql: | + create table t1 (k int primary key, ts timestamp); + select * from (select 1 as col union select 2) u , lateral(select * from hop(t1, ts, interval '10' minute, interval '30' minute) where col = k); + batch_plan: |- + BatchHopWindow { time_col: t1.ts, slide: 00:10:00, size: 00:30:00, output: all } + └─BatchExchange { order: [], dist: Single } + └─BatchFilter { predicate: IsNotNull(t1.ts) } + └─BatchLookupJoin { type: Inner, predicate: 1:Int32 = t1.k AND IsNotNull(t1.ts), output: all } + └─BatchExchange { order: [], dist: UpstreamHashShard(1:Int32) } + └─BatchHashAgg { group_key: [1:Int32], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(1:Int32) } + └─BatchValues { rows: [[1:Int32], [2:Int32]] } + stream_plan: |- + StreamMaterialize { columns: [col, k, ts, window_start, window_end], stream_key: [col, window_start, window_end], pk_columns: [col, window_start, window_end], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(1:Int32, window_start, window_end) } + └─StreamHashJoin { type: Inner, predicate: 1:Int32 = t1.k, output: all } + ├─StreamAppendOnlyDedup { dedup_cols: [1:Int32] } + │ └─StreamExchange { dist: HashShard(1:Int32) } + │ └─StreamProject { exprs: [1:Int32] } + │ └─StreamValues { rows: [[1:Int32, 0:Int64], [2:Int32, 1:Int64]] } + └─StreamExchange { dist: HashShard(t1.k) } + └─StreamHopWindow { time_col: t1.ts, slide: 00:10:00, size: 00:30:00, output: all } + └─StreamFilter { predicate: IsNotNull(t1.ts) } + └─StreamTableScan { table: t1, columns: [t1.k, t1.ts], pk: [t1.k], dist: UpstreamHashShard(t1.k) } diff --git a/src/frontend/planner_test/tests/testdata/output/subquery_expr.yaml b/src/frontend/planner_test/tests/testdata/output/subquery_expr.yaml index bc1da0f48dfb2..1383c156a18f5 100644 --- a/src/frontend/planner_test/tests/testdata/output/subquery_expr.yaml +++ b/src/frontend/planner_test/tests/testdata/output/subquery_expr.yaml @@ -234,3 +234,41 @@ ├─LogicalScan { table: t1, columns: [t1.x, t1.y, t1._row_id] } └─LogicalProject { exprs: [t2.y] } └─LogicalScan { table: t2, columns: [t2.x, t2.y, t2._row_id] } +- sql: | + create table t1 (a int); + create table t2 (b int); + SELECT * + FROM t1 + WHERE EXISTS + (SELECT 1 + FROM t2 + GROUP BY a + ORDER BY a DESC LIMIT 90); + logical_plan: |- + LogicalProject { exprs: [t1.a] } + └─LogicalApply { type: LeftSemi, on: true, correlated_id: 1 } + ├─LogicalScan { table: t1, columns: [t1.a, t1._row_id] } + └─LogicalProject { exprs: [1:Int32] } + └─LogicalTopN { order: [$expr2 DESC], limit: 90, offset: 0 } + └─LogicalProject { exprs: [1:Int32, CorrelatedInputRef { index: 0, correlated_id: 1 } as $expr2] } + └─LogicalAgg { group_key: [$expr1], aggs: [] } + └─LogicalProject { exprs: [CorrelatedInputRef { index: 0, correlated_id: 1 } as $expr1] } + └─LogicalScan { table: t2, columns: [t2.b, t2._row_id] } + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchHashJoin { type: LeftSemi, predicate: t1.a IS NOT DISTINCT FROM t1.a, output: all } + ├─BatchExchange { order: [], dist: HashShard(t1.a) } + │ └─BatchScan { table: t1, columns: [t1.a], distribution: SomeShard } + └─BatchProject { exprs: [t1.a] } + └─BatchGroupTopN { order: [t1.a DESC], limit: 90, offset: 0, group_key: [t1.a] } + └─BatchExchange { order: [], dist: HashShard(t1.a) } + └─BatchProject { exprs: [t1.a, t1.a] } + └─BatchHashAgg { group_key: [t1.a], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t1.a) } + └─BatchNestedLoopJoin { type: Inner, predicate: true, output: all } + ├─BatchExchange { order: [], dist: Single } + │ └─BatchHashAgg { group_key: [t1.a], aggs: [] } + │ └─BatchExchange { order: [], dist: HashShard(t1.a) } + │ └─BatchScan { table: t1, columns: [t1.a], distribution: SomeShard } + └─BatchExchange { order: [], dist: Single } + └─BatchScan { table: t2, columns: [], distribution: SomeShard } diff --git a/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml b/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml index 797bdda0f5bf0..0d393c378ff85 100644 --- a/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml +++ b/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml @@ -466,14 +466,14 @@ └─LogicalScan { table: c, columns: [c.c1, c.c2, c.c3, c._row_id] } optimized_logical_plan_for_batch: |- LogicalAgg { aggs: [count] } - └─LogicalJoin { type: Inner, on: IsNotDistinctFrom(a.a3, a.a3) AND IsNotDistinctFrom(b.b2, b.b2), output: [] } + └─LogicalJoin { type: Inner, on: IsNotDistinctFrom(a.a3, internal_last_seen_value(a.a3)) AND IsNotDistinctFrom(b.b2, b.b2), output: [] } ├─LogicalJoin { type: Inner, on: (a.a3 = b.b2), output: all } │ ├─LogicalScan { table: a, columns: [a.a3] } │ └─LogicalScan { table: b, columns: [b.b2] } └─LogicalFilter { predicate: (3:Int32 = count(1:Int32)) } - └─LogicalAgg { group_key: [a.a3, b.b2], aggs: [count(1:Int32)] } - └─LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(a.a3, c.c3) AND IsNotDistinctFrom(b.b2, c.c2), output: [a.a3, b.b2, 1:Int32] } - ├─LogicalAgg { group_key: [a.a3, b.b2], aggs: [] } + └─LogicalAgg { group_key: [internal_last_seen_value(a.a3), b.b2], aggs: [count(1:Int32)] } + └─LogicalJoin { type: LeftOuter, on: IsNotDistinctFrom(internal_last_seen_value(a.a3), c.c3) AND IsNotDistinctFrom(b.b2, c.c2), output: [internal_last_seen_value(a.a3), b.b2, 1:Int32] } + ├─LogicalAgg { group_key: [b.b2], aggs: [internal_last_seen_value(a.a3)] } │ └─LogicalJoin { type: Inner, on: (a.a3 = b.b2), output: all } │ ├─LogicalScan { table: a, columns: [a.a3] } │ └─LogicalScan { table: b, columns: [b.b2] } @@ -717,15 +717,16 @@ └─BatchScan { table: t2, columns: [t2.x], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [x, y, t1._row_id(hidden)], stream_key: [t1._row_id, x], pk_columns: [t1._row_id, x], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: t1.x IS NOT DISTINCT FROM t2.x, output: all } - ├─StreamExchange { dist: HashShard(t1.x) } - │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamProject { exprs: [t2.x] } - └─StreamGroupTopN { order: [t2.x ASC], limit: 1, offset: 0, group_key: [t2.x] } - └─StreamExchange { dist: HashShard(t2.x) } - └─StreamProject { exprs: [t2.x, t2.x, t2._row_id] } - └─StreamFilter { predicate: IsNotNull(t2.x) } - └─StreamTableScan { table: t2, columns: [t2.x, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.x, t1._row_id) } + └─StreamHashJoin { type: LeftSemi, predicate: t1.x IS NOT DISTINCT FROM t2.x, output: all } + ├─StreamExchange { dist: HashShard(t1.x) } + │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamProject { exprs: [t2.x] } + └─StreamGroupTopN { order: [t2.x ASC], limit: 1, offset: 0, group_key: [t2.x] } + └─StreamExchange { dist: HashShard(t2.x) } + └─StreamProject { exprs: [t2.x, t2.x, t2._row_id] } + └─StreamFilter { predicate: IsNotNull(t2.x) } + └─StreamTableScan { table: t2, columns: [t2.x, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - sql: | create table t1(x int, y int); create table t2(x int, y int); @@ -883,14 +884,15 @@ └─BatchScan { table: t2, columns: [t2.v2, t2.k2], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v1, k1, t1._row_id(hidden)], stream_key: [t1._row_id, v1, k1], pk_columns: [t1._row_id, v1, k1], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2 AND t1.k1 IS NOT DISTINCT FROM t2.k2, output: all } - ├─StreamExchange { dist: HashShard(t1.k1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamGroupTopN { order: [t2.v2 ASC], limit: 1, offset: 0, group_key: [t2.k2] } - └─StreamExchange { dist: HashShard(t2.k2) } - └─StreamProject { exprs: [t2.k2, t2.v2, t2._row_id] } - └─StreamFilter { predicate: IsNotNull(t2.k2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.v1, t1.k1, t1._row_id) } + └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2 AND t1.k1 IS NOT DISTINCT FROM t2.k2, output: all } + ├─StreamExchange { dist: HashShard(t1.k1) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamGroupTopN { order: [t2.v2 ASC], limit: 1, offset: 0, group_key: [t2.k2] } + └─StreamExchange { dist: HashShard(t2.k2) } + └─StreamProject { exprs: [t2.k2, t2.v2, t2._row_id] } + └─StreamFilter { predicate: IsNotNull(t2.k2) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: test ApplyTopNTransposeRule case 2 sql: | create table t1 (v1 int, k1 int); @@ -908,16 +910,17 @@ └─BatchScan { table: t2, columns: [t2.v2], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v1, k1, t1._row_id(hidden)], stream_key: [t1._row_id, v1], pk_columns: [t1._row_id, v1], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2, output: all } - ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(t2.v2) } - └─StreamProject { exprs: [t2.v2, t2._row_id] } - └─StreamTopN { order: [t2.v2 ASC], limit: 1, offset: 0 } - └─StreamExchange { dist: Single } - └─StreamGroupTopN { order: [t2.v2 ASC], limit: 1, offset: 0, group_key: [$expr1] } - └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.v1, t1._row_id) } + └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2, output: all } + ├─StreamExchange { dist: HashShard(t1.v1) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamExchange { dist: HashShard(t2.v2) } + └─StreamProject { exprs: [t2.v2, t2._row_id] } + └─StreamTopN { order: [t2.v2 ASC], limit: 1, offset: 0 } + └─StreamExchange { dist: Single } + └─StreamGroupTopN { order: [t2.v2 ASC], limit: 1, offset: 0, group_key: [$expr1] } + └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: test ApplyLimitTransposeRule case 1 sql: | create table t1 (v1 int, k1 int); @@ -935,11 +938,12 @@ └─BatchScan { table: t2, columns: [t2.v2, t2.k2], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v1, k1, t1._row_id(hidden)], stream_key: [t1._row_id, v1, k1], pk_columns: [t1._row_id, v1, k1], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2 AND t1.k1 IS NOT DISTINCT FROM t2.k2, output: all } - ├─StreamExchange { dist: HashShard(t1.k1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamGroupTopN { order: [t2.k2 ASC], limit: 1, offset: 0, group_key: [t2.k2] } - └─StreamExchange { dist: HashShard(t2.k2) } - └─StreamProject { exprs: [t2.k2, t2.v2, t2._row_id] } - └─StreamFilter { predicate: IsNotNull(t2.k2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.v1, t1.k1, t1._row_id) } + └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2 AND t1.k1 IS NOT DISTINCT FROM t2.k2, output: all } + ├─StreamExchange { dist: HashShard(t1.k1) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamGroupTopN { order: [t2.k2 ASC], limit: 1, offset: 0, group_key: [t2.k2] } + └─StreamExchange { dist: HashShard(t2.k2) } + └─StreamProject { exprs: [t2.k2, t2.v2, t2._row_id] } + └─StreamFilter { predicate: IsNotNull(t2.k2) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml b/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml index a2bdc69c97999..29e391853cf8a 100644 --- a/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml +++ b/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml @@ -97,9 +97,9 @@ Table 3 { columns: [ $expr1 ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } Table 4 - ├── columns: [ vnode, _row_id, t1_backfill_finished ] + ├── columns: [ vnode, _row_id, t1_backfill_finished, t1_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 @@ -122,19 +122,20 @@ select * from t1 join t2 on a = b AND ta < now() - interval '1 hour' and ta >= now() - interval '2 hour'; stream_plan: |- StreamMaterialize { columns: [a, ta, b, tb, t1._row_id(hidden), t2._row_id(hidden)], stream_key: [t1._row_id, t2._row_id, a], pk_columns: [t1._row_id, t2._row_id, a], pk_conflict: NoCheck } - └─StreamHashJoin { type: Inner, predicate: t1.a = t2.b, output: [t1.a, t1.ta, t2.b, t2.tb, t1._row_id, t2._row_id] } - ├─StreamExchange { dist: HashShard(t1.a) } - │ └─StreamDynamicFilter { predicate: (t1.ta < $expr2), output: [t1.a, t1.ta, t1._row_id] } - │ ├─StreamDynamicFilter { predicate: (t1.ta >= $expr1), output_watermarks: [t1.ta], output: [t1.a, t1.ta, t1._row_id], cleaned_by_watermark: true } - │ │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - │ │ └─StreamExchange { dist: Broadcast } - │ │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } - │ │ └─StreamNow { output: [now] } - │ └─StreamExchange { dist: Broadcast } - │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } - │ └─StreamNow { output: [now] } - └─StreamExchange { dist: HashShard(t2.b) } - └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.a, t1._row_id, t2._row_id) } + └─StreamHashJoin { type: Inner, predicate: t1.a = t2.b, output: [t1.a, t1.ta, t2.b, t2.tb, t1._row_id, t2._row_id] } + ├─StreamExchange { dist: HashShard(t1.a) } + │ └─StreamDynamicFilter { predicate: (t1.ta < $expr2), output: [t1.a, t1.ta, t1._row_id] } + │ ├─StreamDynamicFilter { predicate: (t1.ta >= $expr1), output_watermarks: [t1.ta], output: [t1.a, t1.ta, t1._row_id], cleaned_by_watermark: true } + │ │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamExchange { dist: Broadcast } + │ │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } + │ │ └─StreamNow { output: [now] } + │ └─StreamExchange { dist: Broadcast } + │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } + │ └─StreamNow { output: [now] } + └─StreamExchange { dist: HashShard(t2.b) } + └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Temporal filter in on clause for left join's left side sql: | create table t1 (a int, ta timestamp with time zone); @@ -150,19 +151,20 @@ select * from t1 right join t2 on a = b AND ta < now() - interval '1 hour' and ta >= now() - interval '2 hour'; stream_plan: |- StreamMaterialize { columns: [a, ta, b, tb, t2._row_id(hidden), t1._row_id(hidden)], stream_key: [t2._row_id, t1._row_id, b], pk_columns: [t2._row_id, t1._row_id, b], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftOuter, predicate: t2.b = t1.a, output: [t1.a, t1.ta, t2.b, t2.tb, t2._row_id, t1._row_id] } - ├─StreamExchange { dist: HashShard(t2.b) } - │ └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - └─StreamExchange { dist: HashShard(t1.a) } - └─StreamDynamicFilter { predicate: (t1.ta < $expr2), output: [t1.a, t1.ta, t1._row_id] } - ├─StreamDynamicFilter { predicate: (t1.ta >= $expr1), output_watermarks: [t1.ta], output: [t1.a, t1.ta, t1._row_id], cleaned_by_watermark: true } - │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - │ └─StreamExchange { dist: Broadcast } - │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } - │ └─StreamNow { output: [now] } - └─StreamExchange { dist: Broadcast } - └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } - └─StreamNow { output: [now] } + └─StreamExchange { dist: HashShard(t2.b, t2._row_id, t1._row_id) } + └─StreamHashJoin { type: LeftOuter, predicate: t2.b = t1.a, output: [t1.a, t1.ta, t2.b, t2.tb, t2._row_id, t1._row_id] } + ├─StreamExchange { dist: HashShard(t2.b) } + │ └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.a) } + └─StreamDynamicFilter { predicate: (t1.ta < $expr2), output: [t1.a, t1.ta, t1._row_id] } + ├─StreamDynamicFilter { predicate: (t1.ta >= $expr1), output_watermarks: [t1.ta], output: [t1.a, t1.ta, t1._row_id], cleaned_by_watermark: true } + │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamExchange { dist: Broadcast } + │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } + │ └─StreamNow { output: [now] } + └─StreamExchange { dist: Broadcast } + └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } + └─StreamNow { output: [now] } - name: Temporal filter in on clause for full join's left side sql: | create table t1 (a int, ta timestamp with time zone); @@ -178,19 +180,20 @@ select * from t1 left join t2 on a = b AND tb < now() - interval '1 hour' and tb >= now() - interval '2 hour'; stream_plan: |- StreamMaterialize { columns: [a, ta, b, tb, t1._row_id(hidden), t2._row_id(hidden)], stream_key: [t1._row_id, t2._row_id, a], pk_columns: [t1._row_id, t2._row_id, a], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftOuter, predicate: t1.a = t2.b, output: [t1.a, t1.ta, t2.b, t2.tb, t1._row_id, t2._row_id] } - ├─StreamExchange { dist: HashShard(t1.a) } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(t2.b) } - └─StreamDynamicFilter { predicate: (t2.tb < $expr2), output: [t2.b, t2.tb, t2._row_id] } - ├─StreamDynamicFilter { predicate: (t2.tb >= $expr1), output_watermarks: [t2.tb], output: [t2.b, t2.tb, t2._row_id], cleaned_by_watermark: true } - │ ├─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - │ └─StreamExchange { dist: Broadcast } - │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } - │ └─StreamNow { output: [now] } - └─StreamExchange { dist: Broadcast } - └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } - └─StreamNow { output: [now] } + └─StreamExchange { dist: HashShard(t1.a, t1._row_id, t2._row_id) } + └─StreamHashJoin { type: LeftOuter, predicate: t1.a = t2.b, output: [t1.a, t1.ta, t2.b, t2.tb, t1._row_id, t2._row_id] } + ├─StreamExchange { dist: HashShard(t1.a) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamExchange { dist: HashShard(t2.b) } + └─StreamDynamicFilter { predicate: (t2.tb < $expr2), output: [t2.b, t2.tb, t2._row_id] } + ├─StreamDynamicFilter { predicate: (t2.tb >= $expr1), output_watermarks: [t2.tb], output: [t2.b, t2.tb, t2._row_id], cleaned_by_watermark: true } + │ ├─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamExchange { dist: Broadcast } + │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } + │ └─StreamNow { output: [now] } + └─StreamExchange { dist: Broadcast } + └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } + └─StreamNow { output: [now] } - name: Temporal filter in on clause for right join's right side sql: | create table t1 (a int, ta timestamp with time zone); diff --git a/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml b/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml index 88edfa45ad938..f49a82be2dd78 100644 --- a/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml +++ b/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml @@ -5,12 +5,13 @@ create table version(id2 int, a2 int, b2 int, primary key (id2)); select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF PROCTIME() on id1= id2 stream_plan: |- - StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id2, id1], pk_columns: [stream._row_id, id2, id1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2, output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } - ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } - └─StreamTableScan { table: version, columns: [version.id2, version.a2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } + StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id1], pk_columns: [stream._row_id, id1], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream._row_id) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2, output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } + ├─StreamExchange { dist: HashShard(stream.id1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } + └─StreamTableScan { table: version, columns: [version.id2, version.a2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } batch_error: |- Not supported: do not support temporal join for batch queries HINT: please use temporal join in streaming queries @@ -20,36 +21,39 @@ create table version(id2 int, a2 int, b2 int, primary key (id2)); select id1, a1, id2, a2 from stream join version FOR SYSTEM_TIME AS OF PROCTIME() on id1 = id2 where a2 < 10; stream_plan: |- - StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id2, id1], pk_columns: [stream._row_id, id2, id1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } - ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } - └─StreamTableScan { table: version, columns: [version.id2, version.a2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } + StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id1], pk_columns: [stream._row_id, id1], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream._row_id) } + └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } + ├─StreamExchange { dist: HashShard(stream.id1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } + └─StreamTableScan { table: version, columns: [version.id2, version.a2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: implicit join with temporal tables sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; create table version(id2 int, a2 int, b2 int, primary key (id2)); select id1, a1, id2, a2 from stream, version FOR SYSTEM_TIME AS OF PROCTIME() where id1 = id2 AND a2 < 10; stream_plan: |- - StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id2, id1], pk_columns: [stream._row_id, id2, id1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } - ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } - └─StreamTableScan { table: version, columns: [version.id2, version.a2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } + StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id1], pk_columns: [stream._row_id, id1], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream._row_id) } + └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } + ├─StreamExchange { dist: HashShard(stream.id1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } + └─StreamTableScan { table: version, columns: [version.id2, version.a2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: Multi join key for temporal join sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; create table version(id2 int, a2 int, b2 int, primary key (id2, a2)); select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and id1 = id2 where b2 != a2; stream_plan: |- - StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id2, a2, id1, a1], pk_columns: [stream._row_id, id2, a2, id1, a1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND stream.a1 = version.a2 AND (version.b2 <> version.a2), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } - ├─StreamExchange { dist: HashShard(stream.id1, stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2, version.a2) } - └─StreamTableScan { table: version, columns: [version.id2, version.a2, version.b2], pk: [version.id2, version.a2], dist: UpstreamHashShard(version.id2, version.a2) } + StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden)], stream_key: [stream._row_id, id1, a1], pk_columns: [stream._row_id, id1, a1], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream.a1, stream._row_id) } + └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND stream.a1 = version.a2 AND (version.b2 <> version.a2), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } + ├─StreamExchange { dist: HashShard(stream.id1, stream.a1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2, version.a2) } + └─StreamTableScan { table: version, columns: [version.id2, version.a2, version.b2], pk: [version.id2, version.a2], dist: UpstreamHashShard(version.id2, version.a2) } - name: Temporal join with Aggregation sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; @@ -100,16 +104,17 @@ join version1 FOR SYSTEM_TIME AS OF PROCTIME() on stream.k = version1.k join version2 FOR SYSTEM_TIME AS OF PROCTIME() on stream.k = version2.k where a1 < 10; stream_plan: |- - StreamMaterialize { columns: [k, x1, x2, a1, b1, stream._row_id(hidden), version1.k(hidden), version2.k(hidden)], stream_key: [stream._row_id, version1.k, k, version2.k], pk_columns: [stream._row_id, version1.k, k, version2.k], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: Inner, predicate: stream.k = version2.k, output: [stream.k, version1.x1, version2.x2, stream.a1, stream.b1, stream._row_id, version1.k, version2.k] } - ├─StreamTemporalJoin { type: Inner, predicate: stream.k = version1.k, output: [stream.k, stream.a1, stream.b1, version1.x1, stream._row_id, version1.k] } - │ ├─StreamExchange { dist: HashShard(stream.k) } - │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } - │ │ └─StreamTableScan { table: stream, columns: [stream.k, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.k) } - │ └─StreamTableScan { table: version1, columns: [version1.k, version1.x1], pk: [version1.k], dist: UpstreamHashShard(version1.k) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.k) } - └─StreamTableScan { table: version2, columns: [version2.k, version2.x2], pk: [version2.k], dist: UpstreamHashShard(version2.k) } + StreamMaterialize { columns: [k, x1, x2, a1, b1, stream._row_id(hidden), version2.k(hidden)], stream_key: [stream._row_id, k], pk_columns: [stream._row_id, k], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.k, stream._row_id) } + └─StreamTemporalJoin { type: Inner, predicate: stream.k = version2.k, output: [stream.k, version1.x1, version2.x2, stream.a1, stream.b1, stream._row_id, version2.k] } + ├─StreamTemporalJoin { type: Inner, predicate: stream.k = version1.k, output: [stream.k, stream.a1, stream.b1, version1.x1, stream._row_id, version1.k] } + │ ├─StreamExchange { dist: HashShard(stream.k) } + │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } + │ │ └─StreamTableScan { table: stream, columns: [stream.k, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.k) } + │ └─StreamTableScan { table: version1, columns: [version1.k, version1.x1], pk: [version1.k], dist: UpstreamHashShard(version1.k) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.k) } + └─StreamTableScan { table: version2, columns: [version2.k, version2.x2], pk: [version2.k], dist: UpstreamHashShard(version2.k) } - name: multi-way temporal join with different keys sql: | create table stream(id1 int, id2 int, a1 int, b1 int) APPEND ONLY; @@ -120,17 +125,18 @@ join version1 FOR SYSTEM_TIME AS OF PROCTIME() on stream.id1 = version1.id1 join version2 FOR SYSTEM_TIME AS OF PROCTIME() on stream.id2 = version2.id2 where a1 < 10; stream_plan: |- - StreamMaterialize { columns: [id1, x1, id2, x2, a1, b1, stream._row_id(hidden), version1.id1(hidden), version2.id2(hidden)], stream_key: [stream._row_id, version1.id1, id1, version2.id2, id2], pk_columns: [stream._row_id, version1.id1, id1, version2.id2, id2], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: Inner, predicate: stream.id2 = version2.id2, output: [stream.id1, version1.x1, stream.id2, version2.x2, stream.a1, stream.b1, stream._row_id, version1.id1, version2.id2] } - ├─StreamExchange { dist: HashShard(stream.id2) } - │ └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version1.id1, output: [stream.id1, stream.id2, stream.a1, stream.b1, version1.x1, stream._row_id, version1.id1] } - │ ├─StreamExchange { dist: HashShard(stream.id1) } - │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } - │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.id1) } - │ └─StreamTableScan { table: version1, columns: [version1.id1, version1.x1], pk: [version1.id1], dist: UpstreamHashShard(version1.id1) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.id2) } - └─StreamTableScan { table: version2, columns: [version2.id2, version2.x2], pk: [version2.id2], dist: UpstreamHashShard(version2.id2) } + StreamMaterialize { columns: [id1, x1, id2, x2, a1, b1, stream._row_id(hidden), version2.id2(hidden)], stream_key: [stream._row_id, id1, id2], pk_columns: [stream._row_id, id1, id2], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream.id2, stream._row_id) } + └─StreamTemporalJoin { type: Inner, predicate: stream.id2 = version2.id2, output: [stream.id1, version1.x1, stream.id2, version2.x2, stream.a1, stream.b1, stream._row_id, version2.id2] } + ├─StreamExchange { dist: HashShard(stream.id2) } + │ └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version1.id1, output: [stream.id1, stream.id2, stream.a1, stream.b1, version1.x1, stream._row_id, version1.id1] } + │ ├─StreamExchange { dist: HashShard(stream.id1) } + │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } + │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.id1) } + │ └─StreamTableScan { table: version1, columns: [version1.id1, version1.x1], pk: [version1.id1], dist: UpstreamHashShard(version1.id1) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.id2) } + └─StreamTableScan { table: version2, columns: [version2.id2, version2.x2], pk: [version2.id2], dist: UpstreamHashShard(version2.id2) } - name: multi-way temporal join with different keys sql: | create table stream(id1 int, id2 int, a1 int, b1 int) APPEND ONLY; @@ -141,17 +147,18 @@ join version1 FOR SYSTEM_TIME AS OF PROCTIME() on stream.id1 = version1.id1 join version2 FOR SYSTEM_TIME AS OF PROCTIME() on stream.id2 = version2.id2 where a1 < 10; stream_plan: |- - StreamMaterialize { columns: [id1, x1, id2, x2, a1, b1, stream._row_id(hidden), version1.id1(hidden), version2.id2(hidden)], stream_key: [stream._row_id, version1.id1, id1, version2.id2, id2], pk_columns: [stream._row_id, version1.id1, id1, version2.id2, id2], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: Inner, predicate: stream.id2 = version2.id2, output: [stream.id1, version1.x1, stream.id2, version2.x2, stream.a1, stream.b1, stream._row_id, version1.id1, version2.id2] } - ├─StreamExchange { dist: HashShard(stream.id2) } - │ └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version1.id1, output: [stream.id1, stream.id2, stream.a1, stream.b1, version1.x1, stream._row_id, version1.id1] } - │ ├─StreamExchange { dist: HashShard(stream.id1) } - │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } - │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.id1) } - │ └─StreamTableScan { table: version1, columns: [version1.id1, version1.x1], pk: [version1.id1], dist: UpstreamHashShard(version1.id1) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.id2) } - └─StreamTableScan { table: version2, columns: [version2.id2, version2.x2], pk: [version2.id2], dist: UpstreamHashShard(version2.id2) } + StreamMaterialize { columns: [id1, x1, id2, x2, a1, b1, stream._row_id(hidden), version2.id2(hidden)], stream_key: [stream._row_id, id1, id2], pk_columns: [stream._row_id, id1, id2], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream.id2, stream._row_id) } + └─StreamTemporalJoin { type: Inner, predicate: stream.id2 = version2.id2, output: [stream.id1, version1.x1, stream.id2, version2.x2, stream.a1, stream.b1, stream._row_id, version2.id2] } + ├─StreamExchange { dist: HashShard(stream.id2) } + │ └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version1.id1, output: [stream.id1, stream.id2, stream.a1, stream.b1, version1.x1, stream._row_id, version1.id1] } + │ ├─StreamExchange { dist: HashShard(stream.id1) } + │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } + │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.id1) } + │ └─StreamTableScan { table: version1, columns: [version1.id1, version1.x1], pk: [version1.id1], dist: UpstreamHashShard(version1.id1) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.id2) } + └─StreamTableScan { table: version2, columns: [version2.id2, version2.x2], pk: [version2.id2], dist: UpstreamHashShard(version2.id2) } - name: temporal join with an index (distribution key size = 1) sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; @@ -160,11 +167,12 @@ select id1, a1, id2, a2 from stream left join idx2 FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and b1 = b2; stream_plan: |- StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden)], stream_key: [stream._row_id, id2, a1, stream.b1], pk_columns: [stream._row_id, id2, a1, stream.b1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } - ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } - └─StreamTableScan { table: idx2, columns: [idx2.a2, idx2.b2, idx2.id2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } + └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } + ├─StreamExchange { dist: HashShard(stream.a1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } + └─StreamTableScan { table: idx2, columns: [idx2.a2, idx2.b2, idx2.id2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } - name: temporal join with an index (distribution key size = 2) sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; @@ -173,11 +181,12 @@ select id1, a1, id2, a2 from stream left join idx2 FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and b1 = b2; stream_plan: |- StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden)], stream_key: [stream._row_id, id2, a1, stream.b1], pk_columns: [stream._row_id, id2, a1, stream.b1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } - ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } - └─StreamTableScan { table: idx2, columns: [idx2.a2, idx2.b2, idx2.id2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } + └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } + ├─StreamExchange { dist: HashShard(stream.a1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } + └─StreamTableScan { table: idx2, columns: [idx2.a2, idx2.b2, idx2.id2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } - name: temporal join with an index (index column size = 1) sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; @@ -186,11 +195,12 @@ select id1, a1, id2, a2 from stream left join idx2 FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and b1 = b2; stream_plan: |- StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden)], stream_key: [stream._row_id, id2, stream.b1, a1], pk_columns: [stream._row_id, id2, stream.b1, a1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.b1 = idx2.b2 AND (stream.a1 = idx2.a2), output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } - ├─StreamExchange { dist: HashShard(stream.b1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.b2) } - └─StreamTableScan { table: idx2, columns: [idx2.b2, idx2.id2, idx2.a2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.b2) } + └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.b1 = idx2.b2 AND (stream.a1 = idx2.a2), output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } + ├─StreamExchange { dist: HashShard(stream.b1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.b2) } + └─StreamTableScan { table: idx2, columns: [idx2.b2, idx2.id2, idx2.a2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.b2) } - name: temporal join with singleton table sql: | create table t (a int) append only; @@ -212,11 +222,12 @@ select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and b1 = b2; stream_plan: |- StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden)], stream_key: [stream._row_id, id2, a1, stream.b1], pk_columns: [stream._row_id, id2, a1, stream.b1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx.a2 AND stream.b1 = idx.b2, output: [stream.id1, stream.a1, idx.id2, idx.a2, stream._row_id, stream.b1] } - ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx.a2) } - └─StreamTableScan { table: idx, columns: [idx.id2, idx.a2, idx.b2], pk: [idx.id2], dist: UpstreamHashShard(idx.a2) } + └─StreamExchange { dist: HashShard(stream.a1, idx.id2, stream._row_id, stream.b1) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx.a2 AND stream.b1 = idx.b2, output: [stream.id1, stream.a1, idx.id2, idx.a2, stream._row_id, stream.b1] } + ├─StreamExchange { dist: HashShard(stream.a1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx.a2) } + └─StreamTableScan { table: idx, columns: [idx.id2, idx.a2, idx.b2], pk: [idx.id2], dist: UpstreamHashShard(idx.a2) } - name: index selection for temporal join (with two indexes) and should choose the index with a longer prefix.. sql: | create table stream(id1 int, a1 int, b1 int) APPEND ONLY; @@ -226,11 +237,12 @@ select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and b1 = b2; stream_plan: |- StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden)], stream_key: [stream._row_id, id2, a1, stream.b1], pk_columns: [stream._row_id, id2, a1, stream.b1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } - ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } - └─StreamTableScan { table: idx2, columns: [idx2.id2, idx2.a2, idx2.b2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } + └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } + ├─StreamExchange { dist: HashShard(stream.a1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } + └─StreamTableScan { table: idx2, columns: [idx2.id2, idx2.a2, idx2.b2], pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } - name: index selection for temporal join (with three indexes) and should choose primary table. sql: | create table stream(id1 int, a1 int, b1 int, c1 int) APPEND ONLY; @@ -240,12 +252,13 @@ create index idx3 on version (c2); select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF PROCTIME() on a1 = a2 and b1 = b2 and c1 = c2 and id1 = id2; stream_plan: |- - StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden), stream.c1(hidden)], stream_key: [stream._row_id, id2, id1, a1, stream.b1, stream.c1], pk_columns: [stream._row_id, id2, id1, a1, stream.b1, stream.c1], pk_conflict: NoCheck } - └─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2 AND (stream.a1 = version.a2) AND (stream.b1 = version.b2) AND (stream.c1 = version.c2), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id, stream.b1, stream.c1] } - ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream.c1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } - └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } - └─StreamTableScan { table: version, columns: [version.id2, version.a2, version.b2, version.c2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } + StreamMaterialize { columns: [id1, a1, id2, a2, stream._row_id(hidden), stream.b1(hidden), stream.c1(hidden)], stream_key: [stream._row_id, id1, a1, stream.b1, stream.c1], pk_columns: [stream._row_id, id1, a1, stream.b1, stream.c1], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(stream.id1, stream.a1, stream._row_id, stream.b1, stream.c1) } + └─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2 AND (stream.a1 = version.a2) AND (stream.b1 = version.b2) AND (stream.c1 = version.c2), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id, stream.b1, stream.c1] } + ├─StreamExchange { dist: HashShard(stream.id1) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream.c1, stream._row_id], pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } + └─StreamTableScan { table: version, columns: [version.id2, version.a2, version.b2, version.c2], pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: index selection for temporal join (two index) and no one matches. sql: | create table stream(id1 int, a1 int, b1 int, c1 int) APPEND ONLY; diff --git a/src/frontend/planner_test/tests/testdata/output/tpch.yaml b/src/frontend/planner_test/tests/testdata/output/tpch.yaml index 798765115e48d..52c4a4b813198 100644 --- a/src/frontend/planner_test/tests/testdata/output/tpch.yaml +++ b/src/frontend/planner_test/tests/testdata/output/tpch.yaml @@ -168,7 +168,7 @@ Fragment 1 StreamGroupTopN { order: [lineitem.l_returnflag ASC, lineitem.l_linestatus ASC], limit: 1, offset: 0, group_key: [$expr6] } { state table: 1 } └── StreamProject { exprs: [lineitem.l_returnflag, lineitem.l_linestatus, sum(lineitem.l_quantity), sum(lineitem.l_extendedprice), sum($expr1), sum($expr2), (sum(lineitem.l_quantity) / count(lineitem.l_quantity)::Decimal) as $expr3, (sum(lineitem.l_extendedprice) / count(lineitem.l_extendedprice)::Decimal) as $expr4, (sum(lineitem.l_discount) / count(lineitem.l_discount)::Decimal) as $expr5, count, Vnode(lineitem.l_returnflag, lineitem.l_linestatus) as $expr6] } - └── StreamHashAgg { group_key: [lineitem.l_returnflag, lineitem.l_linestatus], aggs: [sum(lineitem.l_quantity), sum(lineitem.l_extendedprice), sum($expr1), sum($expr2), count(lineitem.l_quantity), count(lineitem.l_extendedprice), sum(lineitem.l_discount), count(lineitem.l_discount), count] } { result table: 2, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [lineitem.l_returnflag, lineitem.l_linestatus], aggs: [sum(lineitem.l_quantity), sum(lineitem.l_extendedprice), sum($expr1), sum($expr2), count(lineitem.l_quantity), count(lineitem.l_extendedprice), sum(lineitem.l_discount), count(lineitem.l_discount), count] } { intermediate state table: 2, state tables: [], distinct tables: [] } └── StreamExchange Hash([0, 1]) from 2 Fragment 2 @@ -185,9 +185,9 @@ Table 2 { columns: [ lineitem_l_returnflag, lineitem_l_linestatus, sum(lineitem_l_quantity), sum(lineitem_l_extendedprice), sum($expr1), sum($expr2), count(lineitem_l_quantity), count(lineitem_l_extendedprice), sum(lineitem_l_discount), count(lineitem_l_discount), count ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } - Table 3 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 3 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ l_returnflag, l_linestatus, sum_qty, sum_base_price, sum_disc_price, sum_charge, avg_qty, avg_price, avg_disc, count_order ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ l_returnflag, l_linestatus, sum_qty, sum_base_price, sum_disc_price, sum_charge, avg_qty, avg_price, avg_disc, count_order ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], distribution key: [], read pk prefix len hint: 2 } - id: tpch_q2 before: @@ -321,13 +321,13 @@ └─BatchFilter { predicate: IsNotNull(partsupp.ps_partkey) } └─BatchScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], distribution: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } stream_plan: |- - StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, region.r_regionkey(hidden), nation.n_nationkey(hidden), supplier.s_suppkey(hidden), part.p_partkey(hidden), partsupp.ps_partkey(hidden), partsupp.ps_suppkey(hidden), min(partsupp.ps_supplycost)(hidden)], stream_key: [region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)], pk_columns: [s_acctbal, n_name, s_name, p_partkey, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)], pk_conflict: NoCheck } - └─StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } + StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, region.r_regionkey(hidden), nation.n_nationkey(hidden), supplier.s_suppkey(hidden), part.p_partkey(hidden), partsupp.ps_suppkey(hidden), min(partsupp.ps_supplycost)(hidden)], stream_key: [region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, min(partsupp.ps_supplycost)], pk_columns: [s_acctbal, n_name, s_name, p_partkey, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, min(partsupp.ps_supplycost)], pk_conflict: NoCheck } + └─StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } └─StreamTopN { order: [supplier.s_acctbal DESC, nation.n_name ASC, supplier.s_name ASC, part.p_partkey ASC], limit: 100, offset: 0 } └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [supplier.s_acctbal DESC, nation.n_name ASC, supplier.s_name ASC, part.p_partkey ASC], limit: 100, offset: 0, group_key: [$expr1] } - └─StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost), Vnode(supplier.s_suppkey) as $expr1] } - └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } + └─StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost), Vnode(supplier.s_suppkey) as $expr1] } + └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey, output: [nation.n_name, supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, supplier.s_acctbal, supplier.s_comment, region.r_regionkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } @@ -341,10 +341,10 @@ │ └─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_acctbal, supplier.s_comment], pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } - └─StreamHashJoin { type: Inner, predicate: part.p_partkey IS NOT DISTINCT FROM part.p_partkey AND min(partsupp.ps_supplycost) = partsupp.ps_supplycost, output: [part.p_partkey, part.p_mfgr, partsupp.ps_suppkey, part.p_partkey, min(partsupp.ps_supplycost), partsupp.ps_partkey] } + └─StreamHashJoin { type: Inner, predicate: part.p_partkey IS NOT DISTINCT FROM part.p_partkey AND min(partsupp.ps_supplycost) = partsupp.ps_supplycost, output: [part.p_partkey, part.p_mfgr, partsupp.ps_suppkey, part.p_partkey, min(partsupp.ps_supplycost)] } ├─StreamProject { exprs: [part.p_partkey, min(partsupp.ps_supplycost)] } │ └─StreamHashAgg { group_key: [part.p_partkey], aggs: [min(partsupp.ps_supplycost), count] } - │ └─StreamHashJoin { type: LeftOuter, predicate: part.p_partkey IS NOT DISTINCT FROM partsupp.ps_partkey, output: [part.p_partkey, partsupp.ps_supplycost, partsupp.ps_partkey, partsupp.ps_suppkey, supplier.s_suppkey, region.r_regionkey, nation.n_nationkey, supplier.s_nationkey] } + │ └─StreamHashJoin { type: LeftOuter, predicate: part.p_partkey IS NOT DISTINCT FROM partsupp.ps_partkey, output: [part.p_partkey, partsupp.ps_supplycost, partsupp.ps_partkey, partsupp.ps_suppkey, region.r_regionkey, supplier.s_nationkey] } │ ├─StreamExchange { dist: HashShard(part.p_partkey) } │ │ └─StreamProject { exprs: [part.p_partkey] } │ │ └─StreamHashAgg { group_key: [part.p_partkey], aggs: [count] } @@ -352,7 +352,7 @@ │ │ └─StreamFilter { predicate: (part.p_size = 4:Int32) AND Like(part.p_type, '%TIN':Varchar) } │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_type, part.p_size], pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } │ └─StreamExchange { dist: HashShard(partsupp.ps_partkey) } - │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_suppkey, supplier.s_nationkey, region.r_regionkey, nation.n_nationkey] } + │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_nationkey, region.r_regionkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_partkey, partsupp.ps_supplycost, supplier.s_nationkey, partsupp.ps_suppkey, supplier.s_suppkey] } │ │ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } @@ -377,16 +377,16 @@ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, region.r_regionkey(hidden), nation.n_nationkey(hidden), supplier.s_suppkey(hidden), part.p_partkey(hidden), partsupp.ps_partkey(hidden), partsupp.ps_suppkey(hidden), min(partsupp.ps_supplycost)(hidden)], stream_key: [region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)], pk_columns: [s_acctbal, n_name, s_name, p_partkey, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)], pk_conflict: NoCheck } + StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, region.r_regionkey(hidden), nation.n_nationkey(hidden), supplier.s_suppkey(hidden), part.p_partkey(hidden), partsupp.ps_suppkey(hidden), min(partsupp.ps_supplycost)(hidden)], stream_key: [region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, min(partsupp.ps_supplycost)], pk_columns: [s_acctbal, n_name, s_name, p_partkey, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, min(partsupp.ps_supplycost)], pk_conflict: NoCheck } ├── materialized table: 4294967294 - └── StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } + └── StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } └── StreamTopN { order: [supplier.s_acctbal DESC, nation.n_name ASC, supplier.s_name ASC, part.p_partkey ASC], limit: 100, offset: 0 } { state table: 0 } └── StreamExchange Single from 1 Fragment 1 StreamGroupTopN { order: [supplier.s_acctbal DESC, nation.n_name ASC, supplier.s_name ASC, part.p_partkey ASC], limit: 100, offset: 0, group_key: [$expr1] } { state table: 1 } - └── StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost), Vnode(supplier.s_suppkey) as $expr1] } - └── StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } { left table: 2, right table: 4, left degree table: 3, right degree table: 5 } + └── StreamProject { exprs: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost), Vnode(supplier.s_suppkey) as $expr1] } + └── StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_acctbal, supplier.s_name, nation.n_name, part.p_partkey, part.p_mfgr, supplier.s_address, supplier.s_phone, supplier.s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost)] } { left table: 2, right table: 4, left degree table: 3, right degree table: 5 } ├── StreamExchange Hash([1]) from 2 └── StreamExchange Hash([2]) from 7 @@ -418,10 +418,10 @@ └── BatchPlanNode Fragment 7 - StreamHashJoin { type: Inner, predicate: part.p_partkey IS NOT DISTINCT FROM part.p_partkey AND min(partsupp.ps_supplycost) = partsupp.ps_supplycost, output: [part.p_partkey, part.p_mfgr, partsupp.ps_suppkey, part.p_partkey, min(partsupp.ps_supplycost), partsupp.ps_partkey] } { left table: 17, right table: 19, left degree table: 18, right degree table: 20 } + StreamHashJoin { type: Inner, predicate: part.p_partkey IS NOT DISTINCT FROM part.p_partkey AND min(partsupp.ps_supplycost) = partsupp.ps_supplycost, output: [part.p_partkey, part.p_mfgr, partsupp.ps_suppkey, part.p_partkey, min(partsupp.ps_supplycost)] } { left table: 17, right table: 19, left degree table: 18, right degree table: 20 } ├── StreamProject { exprs: [part.p_partkey, min(partsupp.ps_supplycost)] } - │ └── StreamHashAgg { group_key: [part.p_partkey], aggs: [min(partsupp.ps_supplycost), count] } { result table: 22, state tables: [ 21 ], distinct tables: [] } - │ └── StreamHashJoin { type: LeftOuter, predicate: part.p_partkey IS NOT DISTINCT FROM partsupp.ps_partkey, output: [part.p_partkey, partsupp.ps_supplycost, partsupp.ps_partkey, partsupp.ps_suppkey, supplier.s_suppkey, region.r_regionkey, nation.n_nationkey, supplier.s_nationkey] } { left table: 23, right table: 25, left degree table: 24, right degree table: 26 } + │ └── StreamHashAgg { group_key: [part.p_partkey], aggs: [min(partsupp.ps_supplycost), count] } { intermediate state table: 22, state tables: [ 21 ], distinct tables: [] } + │ └── StreamHashJoin { type: LeftOuter, predicate: part.p_partkey IS NOT DISTINCT FROM partsupp.ps_partkey, output: [part.p_partkey, partsupp.ps_supplycost, partsupp.ps_partkey, partsupp.ps_suppkey, region.r_regionkey, supplier.s_nationkey] } { left table: 23, right table: 25, left degree table: 24, right degree table: 26 } │ ├── StreamExchange Hash([0]) from 8 │ └── StreamExchange Hash([0]) from 9 └── StreamHashJoin { type: Inner, predicate: part.p_partkey = partsupp.ps_partkey, output: [part.p_partkey, part.p_mfgr, partsupp.ps_suppkey, partsupp.ps_supplycost, partsupp.ps_partkey] } { left table: 45, right table: 47, left degree table: 46, right degree table: 48 } @@ -430,7 +430,7 @@ Fragment 8 StreamProject { exprs: [part.p_partkey] } - └── StreamHashAgg { group_key: [part.p_partkey], aggs: [count] } { result table: 27, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [part.p_partkey], aggs: [count] } { intermediate state table: 27, state tables: [], distinct tables: [] } └── StreamProject { exprs: [part.p_partkey] } └── StreamFilter { predicate: (part.p_size = 4:Int32) AND Like(part.p_type, '%TIN':Varchar) } └── Chain { table: part, columns: [part.p_partkey, part.p_type, part.p_size], pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { state table: 28 } @@ -438,7 +438,7 @@ └── BatchPlanNode Fragment 9 - StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_suppkey, supplier.s_nationkey, region.r_regionkey, nation.n_nationkey] } { left table: 29, right table: 31, left degree table: 30, right degree table: 32 } + StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_nationkey, region.r_regionkey, nation.n_nationkey] } { left table: 29, right table: 31, left degree table: 30, right degree table: 32 } ├── StreamExchange Hash([2]) from 10 └── StreamExchange Hash([0]) from 13 @@ -487,17 +487,17 @@ ├── Upstream └── BatchPlanNode - Table 0 { columns: [ supplier_s_acctbal, supplier_s_name, nation_n_name, part_p_partkey, part_p_mfgr, supplier_s_address, supplier_s_phone, supplier_s_comment, region_r_regionkey, nation_n_nationkey, supplier_s_suppkey, part_p_partkey_0, partsupp_ps_partkey, partsupp_ps_suppkey, min(partsupp_ps_supplycost), $expr1 ], primary key: [ $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $12 ASC, $13 ASC, $14 ASC, $15 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], distribution key: [], read pk prefix len hint: 0 } + Table 0 { columns: [ supplier_s_acctbal, supplier_s_name, nation_n_name, part_p_partkey, part_p_mfgr, supplier_s_address, supplier_s_phone, supplier_s_comment, region_r_regionkey, nation_n_nationkey, supplier_s_suppkey, part_p_partkey_0, partsupp_ps_suppkey, min(partsupp_ps_supplycost), $expr1 ], primary key: [ $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $13 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ], distribution key: [], read pk prefix len hint: 0 } - Table 1 { columns: [ supplier_s_acctbal, supplier_s_name, nation_n_name, part_p_partkey, part_p_mfgr, supplier_s_address, supplier_s_phone, supplier_s_comment, region_r_regionkey, nation_n_nationkey, supplier_s_suppkey, part_p_partkey_0, partsupp_ps_partkey, partsupp_ps_suppkey, min(partsupp_ps_supplycost), $expr1 ], primary key: [ $15 ASC, $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $12 ASC, $13 ASC, $14 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], distribution key: [ 10 ], read pk prefix len hint: 1, vnode column idx: 15 } + Table 1 { columns: [ supplier_s_acctbal, supplier_s_name, nation_n_name, part_p_partkey, part_p_mfgr, supplier_s_address, supplier_s_phone, supplier_s_comment, region_r_regionkey, nation_n_nationkey, supplier_s_suppkey, part_p_partkey_0, partsupp_ps_suppkey, min(partsupp_ps_supplycost), $expr1 ], primary key: [ $14 ASC, $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $13 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ], distribution key: [ 10 ], read pk prefix len hint: 1, vnode column idx: 14 } Table 2 { columns: [ nation_n_name, supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, supplier_s_acctbal, supplier_s_comment, region_r_regionkey, nation_n_nationkey ], primary key: [ $1 ASC, $7 ASC, $8 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 3 { columns: [ supplier_s_suppkey, region_r_regionkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 4 { columns: [ part_p_partkey, part_p_mfgr, partsupp_ps_suppkey, part_p_partkey_0, min(partsupp_ps_supplycost), partsupp_ps_partkey ], primary key: [ $2 ASC, $3 ASC, $0 ASC, $5 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 2 ], read pk prefix len hint: 1 } + Table 4 { columns: [ part_p_partkey, part_p_mfgr, partsupp_ps_suppkey, part_p_partkey_0, min(partsupp_ps_supplycost) ], primary key: [ $2 ASC, $3 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 2 ], read pk prefix len hint: 1 } - Table 5 { columns: [ partsupp_ps_suppkey, part_p_partkey, part_p_partkey_0, partsupp_ps_partkey, min(partsupp_ps_supplycost), _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 5 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 5 { columns: [ partsupp_ps_suppkey, part_p_partkey, min(partsupp_ps_supplycost), _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 6 { columns: [ nation_n_nationkey, nation_n_name, region_r_regionkey ], primary key: [ $0 ASC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -515,21 +515,21 @@ Table 13 { columns: [ nation_n_regionkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 14 { columns: [ vnode, r_regionkey, region_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 14 { columns: [ vnode, r_regionkey, region_backfill_finished, region_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 15 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 15 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 16 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 16 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 17 { columns: [ part_p_partkey, min(partsupp_ps_supplycost) ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 2 } Table 18 { columns: [ part_p_partkey, min(partsupp_ps_supplycost), _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 2 } - Table 19 { columns: [ part_p_partkey, part_p_mfgr, partsupp_ps_suppkey, partsupp_ps_supplycost, partsupp_ps_partkey ], primary key: [ $0 ASC, $3 ASC, $4 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 2 } + Table 19 { columns: [ part_p_partkey, part_p_mfgr, partsupp_ps_suppkey, partsupp_ps_supplycost, partsupp_ps_partkey ], primary key: [ $0 ASC, $3 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 2 } - Table 20 { columns: [ part_p_partkey, partsupp_ps_supplycost, partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 2 } + Table 20 { columns: [ part_p_partkey, partsupp_ps_supplycost, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 2 } - Table 21 { columns: [ part_p_partkey, partsupp_ps_supplycost, partsupp_ps_partkey, partsupp_ps_suppkey, supplier_s_suppkey, region_r_regionkey, nation_n_nationkey, supplier_s_nationkey ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 21 { columns: [ part_p_partkey, partsupp_ps_supplycost, partsupp_ps_suppkey, region_r_regionkey, supplier_s_nationkey ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 22 { columns: [ part_p_partkey, min(partsupp_ps_supplycost), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -537,17 +537,17 @@ Table 24 { columns: [ part_p_partkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 25 { columns: [ partsupp_ps_partkey, partsupp_ps_supplycost, partsupp_ps_suppkey, supplier_s_suppkey, supplier_s_nationkey, region_r_regionkey, nation_n_nationkey ], primary key: [ $0 ASC, $2 ASC, $3 ASC, $5 ASC, $6 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 25 { columns: [ partsupp_ps_partkey, partsupp_ps_supplycost, partsupp_ps_suppkey, supplier_s_nationkey, region_r_regionkey, nation_n_nationkey ], primary key: [ $0 ASC, $2 ASC, $4 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 26 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, supplier_s_suppkey, region_r_regionkey, nation_n_nationkey, supplier_s_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 6 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 26 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, region_r_regionkey, supplier_s_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 27 { columns: [ part_p_partkey, count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 28 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 28 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 29 { columns: [ partsupp_ps_partkey, partsupp_ps_supplycost, supplier_s_nationkey, partsupp_ps_suppkey, supplier_s_suppkey ], primary key: [ $2 ASC, $0 ASC, $3 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 2 ], read pk prefix len hint: 1 } + Table 29 { columns: [ partsupp_ps_partkey, partsupp_ps_supplycost, supplier_s_nationkey, partsupp_ps_suppkey, supplier_s_suppkey ], primary key: [ $2 ASC, $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 2 ], read pk prefix len hint: 1 } - Table 30 { columns: [ supplier_s_nationkey, partsupp_ps_partkey, partsupp_ps_suppkey, supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 30 { columns: [ supplier_s_nationkey, partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 31 { columns: [ nation_n_nationkey, region_r_regionkey ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -561,9 +561,9 @@ Table 36 { columns: [ supplier_s_suppkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 37 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 37 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 38 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 38 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 39 { columns: [ region_r_regionkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -573,9 +573,9 @@ Table 42 { columns: [ nation_n_regionkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 43 { columns: [ vnode, r_regionkey, region_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 43 { columns: [ vnode, r_regionkey, region_backfill_finished, region_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 44 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 44 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 45 { columns: [ part_p_partkey, part_p_mfgr ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -585,11 +585,11 @@ Table 48 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 49 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 49 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 50 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 50 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost) ], primary key: [ $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $12 ASC, $13 ASC, $14 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ], distribution key: [], read pk prefix len hint: 8 } + Table 4294967294 { columns: [ s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, partsupp.ps_suppkey, min(partsupp.ps_supplycost) ], primary key: [ $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $13 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ], distribution key: [], read pk prefix len hint: 9 } - id: tpch_q3 before: @@ -696,7 +696,7 @@ Fragment 1 StreamGroupTopN { order: [sum($expr1) DESC, orders.o_orderdate ASC], limit: 10, offset: 0, group_key: [$expr2] } { state table: 1 } └── StreamProject { exprs: [lineitem.l_orderkey, sum($expr1), orders.o_orderdate, orders.o_shippriority, Vnode(lineitem.l_orderkey, orders.o_orderdate, orders.o_shippriority) as $expr2] } - └── StreamHashAgg { group_key: [lineitem.l_orderkey, orders.o_orderdate, orders.o_shippriority], aggs: [sum($expr1), count] } { result table: 2, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [lineitem.l_orderkey, orders.o_orderdate, orders.o_shippriority], aggs: [sum($expr1), count] } { intermediate state table: 2, state tables: [], distinct tables: [] } └── StreamExchange Hash([0, 1, 2]) from 2 Fragment 2 @@ -735,7 +735,7 @@ ├── Upstream └── BatchPlanNode - Table 0 { columns: [ lineitem_l_orderkey, sum($expr1), orders_o_orderdate, orders_o_shippriority, $expr2 ], primary key: [ $1 DESC, $2 ASC, $0 ASC, $3 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [], read pk prefix len hint: 0 } + Table 0 { columns: [ lineitem_l_orderkey, sum($expr1), orders_o_orderdate, orders_o_shippriority, $expr2 ], primary key: [ $1 DESC, $2 ASC, $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [], read pk prefix len hint: 0 } Table 1 ├── columns: [ lineitem_l_orderkey, sum($expr1), orders_o_orderdate, orders_o_shippriority, $expr2 ] @@ -763,13 +763,13 @@ Table 10 { columns: [ orders_o_custkey, orders_o_orderkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 11 { columns: [ vnode, c_custkey, customer_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 11 { columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 12 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 12 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 13 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 13 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ l_orderkey, revenue, o_orderdate, o_shippriority ], primary key: [ $1 DESC, $2 ASC, $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 3 } + Table 4294967294 { columns: [ l_orderkey, revenue, o_orderdate, o_shippriority ], primary key: [ $1 DESC, $2 ASC, $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 4 } - id: tpch_q4 before: @@ -859,7 +859,7 @@ StreamGroupTopN { order: [orders.o_orderpriority ASC], limit: 1, offset: 0, group_key: [$expr1] } { state table: 1 } └── StreamProject { exprs: [orders.o_orderpriority, count, Vnode(orders.o_orderpriority) as $expr1] } └── StreamHashAgg { group_key: [orders.o_orderpriority], aggs: [count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 2 @@ -903,11 +903,11 @@ Table 6 { columns: [ lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 7 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 7 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 8 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 8 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ o_orderpriority, order_count ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ o_orderpriority, order_count ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q5 before: @@ -1005,8 +1005,8 @@ └─StreamProject { exprs: [nation.n_name, sum($expr1), Vnode(nation.n_name) as $expr2] } └─StreamHashAgg { group_key: [nation.n_name], aggs: [sum($expr1), count] } └─StreamExchange { dist: HashShard(nation.n_name) } - └─StreamProject { exprs: [nation.n_name, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, customer.c_custkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, supplier.s_suppkey, lineitem.l_suppkey, customer.c_nationkey] } - └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey AND nation.n_nationkey = customer.c_nationkey, output: [lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, customer.c_custkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, supplier.s_suppkey, lineitem.l_suppkey, customer.c_nationkey] } + └─StreamProject { exprs: [nation.n_name, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_linenumber, lineitem.l_suppkey] } + └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey AND nation.n_nationkey = customer.c_nationkey, output: [lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_linenumber, lineitem.l_suppkey, customer.c_nationkey] } ├─StreamExchange { dist: HashShard(nation.n_nationkey, nation.n_nationkey) } │ └─StreamHashJoin { type: Inner, predicate: region.r_regionkey = nation.n_regionkey, output: [nation.n_nationkey, nation.n_name, region.r_regionkey] } │ ├─StreamExchange { dist: HashShard(region.r_regionkey) } @@ -1017,7 +1017,7 @@ │ └─StreamFilter { predicate: (nation.n_nationkey = nation.n_nationkey) } │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(customer.c_nationkey, supplier.s_nationkey) } - └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey AND customer.c_nationkey = supplier.s_nationkey, output: [customer.c_nationkey, lineitem.l_extendedprice, lineitem.l_discount, supplier.s_nationkey, orders.o_orderkey, customer.c_custkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, supplier.s_suppkey, lineitem.l_suppkey] } + └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey AND customer.c_nationkey = supplier.s_nationkey, output: [customer.c_nationkey, lineitem.l_extendedprice, lineitem.l_discount, supplier.s_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_suppkey] } ├─StreamExchange { dist: HashShard(orders.o_orderkey, customer.c_nationkey) } │ └─StreamHashJoin { type: Inner, predicate: orders.o_custkey = customer.c_custkey, output: [orders.o_orderkey, customer.c_nationkey, orders.o_custkey, customer.c_custkey] } │ ├─StreamExchange { dist: HashShard(orders.o_custkey) } @@ -1044,14 +1044,14 @@ StreamGroupTopN { order: [sum($expr1) DESC], limit: 1, offset: 0, group_key: [$expr2] } { state table: 1 } └── StreamProject { exprs: [nation.n_name, sum($expr1), Vnode(nation.n_name) as $expr2] } └── StreamHashAgg { group_key: [nation.n_name], aggs: [sum($expr1), count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 2 Fragment 2 - StreamProject { exprs: [nation.n_name, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, customer.c_custkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, supplier.s_suppkey, lineitem.l_suppkey, customer.c_nationkey] } - └── StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey AND nation.n_nationkey = customer.c_nationkey, output: [lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, customer.c_custkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, supplier.s_suppkey, lineitem.l_suppkey, customer.c_nationkey] } + StreamProject { exprs: [nation.n_name, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_linenumber, lineitem.l_suppkey] } + └── StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey AND nation.n_nationkey = customer.c_nationkey, output: [lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, region.r_regionkey, nation.n_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_linenumber, lineitem.l_suppkey, customer.c_nationkey] } ├── left table: 3 ├── right table: 5 ├── left degree table: 4 @@ -1078,7 +1078,7 @@ └── BatchPlanNode Fragment 6 - StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey AND customer.c_nationkey = supplier.s_nationkey, output: [customer.c_nationkey, lineitem.l_extendedprice, lineitem.l_discount, supplier.s_nationkey, orders.o_orderkey, customer.c_custkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, supplier.s_suppkey, lineitem.l_suppkey] } + StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey AND customer.c_nationkey = supplier.s_nationkey, output: [customer.c_nationkey, lineitem.l_extendedprice, lineitem.l_discount, supplier.s_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_suppkey] } ├── left table: 13 ├── right table: 15 ├── left degree table: 14 @@ -1129,13 +1129,13 @@ Table 4 { columns: [ nation_n_nationkey, nation_n_nationkey_0, region_r_regionkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 1, 0 ], read pk prefix len hint: 2 } Table 5 - ├── columns: [ customer_c_nationkey, lineitem_l_extendedprice, lineitem_l_discount, supplier_s_nationkey, orders_o_orderkey, customer_c_custkey, orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, supplier_s_suppkey, lineitem_l_suppkey ] - ├── primary key: [ $3 ASC, $0 ASC, $4 ASC, $5 ASC, $6 ASC, $7 ASC, $8 ASC, $9 ASC, $10 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] + ├── columns: [ customer_c_nationkey, lineitem_l_extendedprice, lineitem_l_discount, supplier_s_nationkey, orders_o_orderkey, orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, lineitem_l_suppkey ] + ├── primary key: [ $3 ASC, $0 ASC, $4 ASC, $5 ASC, $7 ASC, $8 ASC ] + ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ] ├── distribution key: [ 0, 3 ] └── read pk prefix len hint: 2 - Table 6 { columns: [ supplier_s_nationkey, customer_c_nationkey, orders_o_orderkey, customer_c_custkey, orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, supplier_s_suppkey, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC, $7 ASC, $8 ASC ], value indices: [ 9 ], distribution key: [ 1, 0 ], read pk prefix len hint: 2 } + Table 6 { columns: [ supplier_s_nationkey, customer_c_nationkey, orders_o_orderkey, orders_o_custkey, lineitem_l_linenumber, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 6 ], distribution key: [ 1, 0 ], read pk prefix len hint: 2 } Table 7 { columns: [ region_r_regionkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -1145,17 +1145,17 @@ Table 10 { columns: [ nation_n_regionkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 11 { columns: [ vnode, r_regionkey, region_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 11 { columns: [ vnode, r_regionkey, region_backfill_finished, region_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 12 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 12 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 13 { columns: [ orders_o_orderkey, customer_c_nationkey, orders_o_custkey, customer_c_custkey ], primary key: [ $0 ASC, $1 ASC, $3 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } + Table 13 { columns: [ orders_o_orderkey, customer_c_nationkey, orders_o_custkey, customer_c_custkey ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } - Table 14 { columns: [ orders_o_orderkey, customer_c_nationkey, customer_c_custkey, orders_o_custkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } + Table 14 { columns: [ orders_o_orderkey, customer_c_nationkey, orders_o_custkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } - Table 15 { columns: [ lineitem_l_orderkey, lineitem_l_extendedprice, lineitem_l_discount, supplier_s_nationkey, lineitem_l_linenumber, lineitem_l_suppkey, supplier_s_suppkey ], primary key: [ $0 ASC, $3 ASC, $4 ASC, $6 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [ 0, 3 ], read pk prefix len hint: 2 } + Table 15 { columns: [ lineitem_l_orderkey, lineitem_l_extendedprice, lineitem_l_discount, supplier_s_nationkey, lineitem_l_linenumber, lineitem_l_suppkey, supplier_s_suppkey ], primary key: [ $0 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [ 0, 3 ], read pk prefix len hint: 2 } - Table 16 { columns: [ lineitem_l_orderkey, supplier_s_nationkey, lineitem_l_linenumber, supplier_s_suppkey, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 5 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } + Table 16 { columns: [ lineitem_l_orderkey, supplier_s_nationkey, lineitem_l_linenumber, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } Table 17 { columns: [ orders_o_orderkey, orders_o_custkey ], primary key: [ $1 ASC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } @@ -1165,9 +1165,9 @@ Table 20 { columns: [ customer_c_custkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 21 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 21 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 22 { columns: [ vnode, c_custkey, customer_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 22 { columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 23 { columns: [ lineitem_l_orderkey, lineitem_l_suppkey, lineitem_l_extendedprice, lineitem_l_discount, lineitem_l_linenumber ], primary key: [ $1 ASC, $0 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 1 ], read pk prefix len hint: 1 } @@ -1177,11 +1177,11 @@ Table 26 { columns: [ supplier_s_suppkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 27 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 27 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 28 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 28 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ n_name, revenue ], primary key: [ $1 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ n_name, revenue ], primary key: [ $1 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q6 before: @@ -1228,7 +1228,7 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [sum(sum($expr1))] } └── StreamSimpleAgg { aggs: [sum(sum($expr1)), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Single from 1 @@ -1244,7 +1244,7 @@ Table 0 { columns: [ sum(sum($expr1)), count ], primary key: [], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } - Table 1 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 1 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ revenue ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } @@ -1355,7 +1355,7 @@ └─StreamProject { exprs: [nation.n_name, nation.n_name, $expr1, sum($expr2), Vnode(nation.n_name, nation.n_name, $expr1) as $expr3] } └─StreamHashAgg { group_key: [nation.n_name, nation.n_name, $expr1], aggs: [sum($expr2), count] } └─StreamExchange { dist: HashShard(nation.n_name, nation.n_name, $expr1) } - └─StreamProject { exprs: [nation.n_name, nation.n_name, Extract('YEAR':Varchar, lineitem.l_shipdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_nationkey, supplier.s_suppkey, lineitem.l_orderkey, lineitem.l_linenumber, nation.n_nationkey, customer.c_custkey, orders.o_orderkey] } + └─StreamProject { exprs: [nation.n_name, nation.n_name, Extract('YEAR':Varchar, lineitem.l_shipdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_nationkey, supplier.s_suppkey, lineitem.l_orderkey, lineitem.l_linenumber, nation.n_nationkey, customer.c_custkey] } └─StreamFilter { predicate: (((nation.n_name = 'ROMANIA':Varchar) AND (nation.n_name = 'IRAN':Varchar)) OR ((nation.n_name = 'IRAN':Varchar) AND (nation.n_name = 'ROMANIA':Varchar))) } └─StreamHashJoin { type: Inner, predicate: lineitem.l_orderkey = orders.o_orderkey, output: all } ├─StreamExchange { dist: HashShard(lineitem.l_orderkey) } @@ -1391,13 +1391,13 @@ StreamGroupTopN { order: [nation.n_name ASC, nation.n_name ASC, $expr1 ASC], limit: 1, offset: 0, group_key: [$expr3] } { state table: 1 } └── StreamProject { exprs: [nation.n_name, nation.n_name, $expr1, sum($expr2), Vnode(nation.n_name, nation.n_name, $expr1) as $expr3] } └── StreamHashAgg { group_key: [nation.n_name, nation.n_name, $expr1], aggs: [sum($expr2), count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0, 1, 2]) from 2 Fragment 2 - StreamProject { exprs: [nation.n_name, nation.n_name, Extract('YEAR':Varchar, lineitem.l_shipdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_nationkey, supplier.s_suppkey, lineitem.l_orderkey, lineitem.l_linenumber, nation.n_nationkey, customer.c_custkey, orders.o_orderkey] } + StreamProject { exprs: [nation.n_name, nation.n_name, Extract('YEAR':Varchar, lineitem.l_shipdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_nationkey, supplier.s_suppkey, lineitem.l_orderkey, lineitem.l_linenumber, nation.n_nationkey, customer.c_custkey] } └── StreamFilter { predicate: (((nation.n_name = 'ROMANIA':Varchar) AND (nation.n_name = 'IRAN':Varchar)) OR ((nation.n_name = 'IRAN':Varchar) AND (nation.n_name = 'ROMANIA':Varchar))) } └── StreamHashJoin { type: Inner, predicate: lineitem.l_orderkey = orders.o_orderkey, output: all } { left table: 3, right table: 5, left degree table: 4, right degree table: 6 } ├── StreamExchange Hash([1]) from 3 @@ -1429,7 +1429,8 @@ Fragment 7 StreamFilter { predicate: (lineitem.l_shipdate >= '1983-01-01':Date) AND (lineitem.l_shipdate <= '2000-12-31':Date) } - └── Chain { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate, lineitem.l_linenumber], pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { state table: 17 } + └── Chain { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate, lineitem.l_linenumber], pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + ├── state table: 17 ├── Upstream └── BatchPlanNode @@ -1493,11 +1494,11 @@ Table 14 { columns: [ supplier_s_nationkey, supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 15 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 15 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 16 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 16 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 17 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 17 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 18 { columns: [ nation_n_name, customer_c_custkey, nation_n_nationkey ], primary key: [ $1 ASC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [ 1 ], read pk prefix len hint: 1 } @@ -1515,13 +1516,13 @@ Table 25 { columns: [ customer_c_nationkey, customer_c_custkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 26 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 26 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 27 { columns: [ vnode, c_custkey, customer_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 27 { columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 28 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 28 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ supp_nation, cust_nation, l_year, revenue ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ supp_nation, cust_nation, l_year, revenue ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 3 } - id: tpch_q8 before: @@ -1645,9 +1646,9 @@ └─StreamProject { exprs: [$expr1, (sum($expr3) / sum($expr2)) as $expr4, Vnode($expr1) as $expr5] } └─StreamHashAgg { group_key: [$expr1], aggs: [sum($expr3), sum($expr2), count] } └─StreamExchange { dist: HashShard($expr1) } - └─StreamProject { exprs: [$expr1, Case((nation.n_name = 'IRAN':Varchar), $expr2, 0:Decimal) as $expr3, $expr2, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } - └─StreamProject { exprs: [Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } - └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [lineitem.l_extendedprice, lineitem.l_discount, orders.o_orderdate, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } + └─StreamProject { exprs: [$expr1, Case((nation.n_name = 'IRAN':Varchar), $expr2, 0:Decimal) as $expr3, $expr2, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamProject { exprs: [Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [lineitem.l_extendedprice, lineitem.l_discount, orders.o_orderdate, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(customer.c_custkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = customer.c_nationkey, output: [customer.c_custkey, region.r_regionkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } @@ -1693,15 +1694,15 @@ StreamGroupTopN { order: [$expr1 ASC], limit: 1, offset: 0, group_key: [$expr5] } { state table: 1 } └── StreamProject { exprs: [$expr1, (sum($expr3) / sum($expr2)) as $expr4, Vnode($expr1) as $expr5] } └── StreamHashAgg { group_key: [$expr1], aggs: [sum($expr3), sum($expr2), count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 2 Fragment 2 - StreamProject { exprs: [$expr1, Case((nation.n_name = 'IRAN':Varchar), $expr2, 0:Decimal) as $expr3, $expr2, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } - └── StreamProject { exprs: [Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } - └── StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [lineitem.l_extendedprice, lineitem.l_discount, orders.o_orderdate, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } + StreamProject { exprs: [$expr1, Case((nation.n_name = 'IRAN':Varchar), $expr2, 0:Decimal) as $expr3, $expr2, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └── StreamProject { exprs: [Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └── StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [lineitem.l_extendedprice, lineitem.l_discount, orders.o_orderdate, nation.n_name, region.r_regionkey, nation.n_nationkey, customer.c_custkey, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├── left table: 3 ├── right table: 5 ├── left degree table: 4 @@ -1800,12 +1801,12 @@ Table 5 ├── columns: [ nation_n_name, lineitem_l_extendedprice, lineitem_l_discount, orders_o_custkey, orders_o_orderdate, nation_n_nationkey, supplier_s_suppkey, part_p_partkey, lineitem_l_orderkey, lineitem_l_linenumber, orders_o_orderkey ] - ├── primary key: [ $3 ASC, $5 ASC, $6 ASC, $7 ASC, $8 ASC, $9 ASC, $10 ASC ] + ├── primary key: [ $3 ASC, $5 ASC, $6 ASC, $7 ASC, $8 ASC, $9 ASC ] ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] ├── distribution key: [ 3 ] └── read pk prefix len hint: 1 - Table 6 { columns: [ orders_o_custkey, nation_n_nationkey, supplier_s_suppkey, part_p_partkey, lineitem_l_orderkey, lineitem_l_linenumber, orders_o_orderkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC ], value indices: [ 7 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 6 { columns: [ orders_o_custkey, nation_n_nationkey, supplier_s_suppkey, part_p_partkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 6 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 7 { columns: [ nation_n_nationkey, region_r_regionkey ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -1823,11 +1824,11 @@ Table 14 { columns: [ nation_n_regionkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 15 { columns: [ vnode, r_regionkey, region_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 15 { columns: [ vnode, r_regionkey, region_backfill_finished, region_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 16 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 16 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 17 { columns: [ vnode, c_custkey, customer_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 17 { columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 18 { columns: [ nation_n_name, lineitem_l_orderkey, lineitem_l_extendedprice, lineitem_l_discount, nation_n_nationkey, supplier_s_suppkey, part_p_partkey, lineitem_l_linenumber ], primary key: [ $1 ASC, $4 ASC, $5 ASC, $6 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [ 1 ], read pk prefix len hint: 1 } @@ -1853,9 +1854,9 @@ Table 29 { columns: [ supplier_s_nationkey, supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 30 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 30 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 31 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 31 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 32 { columns: [ part_p_partkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -1865,13 +1866,13 @@ Table 35 { columns: [ lineitem_l_partkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 36 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 36 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 37 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 37 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 38 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 38 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ o_year, mkt_share ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ o_year, mkt_share ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q9 before: @@ -1974,8 +1975,8 @@ └─StreamProject { exprs: [nation.n_name, $expr1, sum($expr2), Vnode(nation.n_name, $expr1) as $expr3] } └─StreamHashAgg { group_key: [nation.n_name, $expr1], aggs: [sum($expr2), count] } └─StreamExchange { dist: HashShard(nation.n_name, $expr1) } - └─StreamProject { exprs: [nation.n_name, Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, ((lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) - (partsupp.ps_supplycost * lineitem.l_quantity)) as $expr2, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } - └─StreamHashJoin { type: Inner, predicate: part.p_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = lineitem.l_suppkey AND partsupp.ps_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = supplier.s_suppkey, output: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, partsupp.ps_supplycost, orders.o_orderdate, nation.n_name, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamProject { exprs: [nation.n_name, Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, ((lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) - (partsupp.ps_supplycost * lineitem.l_quantity)) as $expr2, part.p_partkey, partsupp.ps_suppkey, nation.n_nationkey, orders.o_orderkey, lineitem.l_linenumber, partsupp.ps_partkey] } + └─StreamHashJoin { type: Inner, predicate: part.p_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = lineitem.l_suppkey AND partsupp.ps_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = supplier.s_suppkey, output: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, partsupp.ps_supplycost, orders.o_orderdate, nation.n_name, part.p_partkey, partsupp.ps_suppkey, partsupp.ps_partkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: part.p_partkey = partsupp.ps_partkey, output: all } │ ├─StreamExchange { dist: HashShard(part.p_partkey) } @@ -1985,7 +1986,7 @@ │ └─StreamExchange { dist: HashShard(partsupp.ps_partkey) } │ └─StreamFilter { predicate: (partsupp.ps_suppkey = partsupp.ps_suppkey) } │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } - └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [nation.n_name, supplier.s_suppkey, orders.o_orderdate, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, nation.n_nationkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [nation.n_name, supplier.s_suppkey, orders.o_orderdate, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, nation.n_nationkey, orders.o_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey, output: [nation.n_name, supplier.s_suppkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } @@ -2011,20 +2012,20 @@ StreamGroupTopN { order: [nation.n_name ASC, $expr1 DESC], limit: 1, offset: 0, group_key: [$expr3] } { state table: 1 } └── StreamProject { exprs: [nation.n_name, $expr1, sum($expr2), Vnode(nation.n_name, $expr1) as $expr3] } └── StreamHashAgg { group_key: [nation.n_name, $expr1], aggs: [sum($expr2), count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0, 1]) from 2 Fragment 2 - StreamProject { exprs: [nation.n_name, Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, ((lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) - (partsupp.ps_supplycost * lineitem.l_quantity)) as $expr2, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } - └── StreamHashJoin { type: Inner, predicate: part.p_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = lineitem.l_suppkey AND partsupp.ps_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = supplier.s_suppkey, output: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, partsupp.ps_supplycost, orders.o_orderdate, nation.n_name, part.p_partkey, partsupp.ps_partkey, partsupp.ps_suppkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + StreamProject { exprs: [nation.n_name, Extract('YEAR':Varchar, orders.o_orderdate) as $expr1, ((lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) - (partsupp.ps_supplycost * lineitem.l_quantity)) as $expr2, part.p_partkey, partsupp.ps_suppkey, nation.n_nationkey, orders.o_orderkey, lineitem.l_linenumber, partsupp.ps_partkey] } + └── StreamHashJoin { type: Inner, predicate: part.p_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = lineitem.l_suppkey AND partsupp.ps_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = supplier.s_suppkey, output: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, partsupp.ps_supplycost, orders.o_orderdate, nation.n_name, part.p_partkey, partsupp.ps_suppkey, partsupp.ps_partkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_linenumber] } ├── left table: 3 ├── right table: 5 ├── left degree table: 4 ├── right degree table: 6 ├── StreamExchange Hash([2]) from 3 - └── StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [nation.n_name, supplier.s_suppkey, orders.o_orderdate, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, nation.n_nationkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } { left table: 13, right table: 15, left degree table: 14, right degree table: 16 } + └── StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [nation.n_name, supplier.s_suppkey, orders.o_orderdate, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, nation.n_nationkey, orders.o_orderkey, lineitem.l_linenumber] } { left table: 13, right table: 15, left degree table: 14, right degree table: 16 } ├── StreamExchange Hash([1]) from 6 └── StreamExchange Hash([2]) from 9 @@ -2087,9 +2088,9 @@ Table 4 { columns: [ part_p_partkey, partsupp_ps_suppkey, partsupp_ps_partkey, partsupp_ps_suppkey_0, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 3 ], read pk prefix len hint: 4 } - Table 5 { columns: [ nation_n_name, supplier_s_suppkey, orders_o_orderdate, lineitem_l_partkey, lineitem_l_suppkey, lineitem_l_quantity, lineitem_l_extendedprice, lineitem_l_discount, nation_n_nationkey, orders_o_orderkey, lineitem_l_orderkey, lineitem_l_linenumber ], primary key: [ $3 ASC, $4 ASC, $3 ASC, $1 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ], distribution key: [ 1 ], read pk prefix len hint: 4 } + Table 5 { columns: [ nation_n_name, supplier_s_suppkey, orders_o_orderdate, lineitem_l_partkey, lineitem_l_suppkey, lineitem_l_quantity, lineitem_l_extendedprice, lineitem_l_discount, nation_n_nationkey, orders_o_orderkey, lineitem_l_linenumber ], primary key: [ $3 ASC, $4 ASC, $3 ASC, $1 ASC, $8 ASC, $9 ASC, $10 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ], distribution key: [ 1 ], read pk prefix len hint: 4 } - Table 6 { columns: [ lineitem_l_partkey, lineitem_l_suppkey, lineitem_l_partkey_0, supplier_s_suppkey, nation_n_nationkey, orders_o_orderkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC, $7 ASC ], value indices: [ 8 ], distribution key: [ 3 ], read pk prefix len hint: 4 } + Table 6 { columns: [ lineitem_l_partkey, lineitem_l_suppkey, lineitem_l_partkey_0, supplier_s_suppkey, nation_n_nationkey, orders_o_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC ], value indices: [ 7 ], distribution key: [ 3 ], read pk prefix len hint: 4 } Table 7 { columns: [ part_p_partkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2099,17 +2100,17 @@ Table 10 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 11 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 11 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 12 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 12 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 13 { columns: [ nation_n_name, supplier_s_suppkey, nation_n_nationkey ], primary key: [ $1 ASC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 14 { columns: [ supplier_s_suppkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 15 { columns: [ orders_o_orderdate, lineitem_l_partkey, lineitem_l_suppkey, lineitem_l_quantity, lineitem_l_extendedprice, lineitem_l_discount, orders_o_orderkey, lineitem_l_orderkey, lineitem_l_linenumber ], primary key: [ $2 ASC, $6 ASC, $7 ASC, $8 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ], distribution key: [ 2 ], read pk prefix len hint: 1 } + Table 15 { columns: [ orders_o_orderdate, lineitem_l_partkey, lineitem_l_suppkey, lineitem_l_quantity, lineitem_l_extendedprice, lineitem_l_discount, orders_o_orderkey, lineitem_l_orderkey, lineitem_l_linenumber ], primary key: [ $2 ASC, $6 ASC, $8 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ], distribution key: [ 2 ], read pk prefix len hint: 1 } - Table 16 { columns: [ lineitem_l_suppkey, orders_o_orderkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 16 { columns: [ lineitem_l_suppkey, orders_o_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 17 { columns: [ nation_n_nationkey, nation_n_name ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2119,9 +2120,9 @@ Table 20 { columns: [ supplier_s_nationkey, supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 21 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 21 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 22 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 22 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 23 { columns: [ orders_o_orderkey, orders_o_orderdate ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2131,11 +2132,11 @@ Table 26 { columns: [ lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 27 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 27 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 28 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 28 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ nation, o_year, sum_profit ], primary key: [ $0 ASC, $1 DESC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ nation, o_year, sum_profit ], primary key: [ $0 ASC, $1 DESC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 2 } - id: tpch_q10 before: @@ -2229,8 +2230,8 @@ └─StreamGroupTopN { order: [sum($expr1) DESC], limit: 20, offset: 0, group_key: [$expr2] } └─StreamProject { exprs: [customer.c_custkey, customer.c_name, sum($expr1), customer.c_acctbal, nation.n_name, customer.c_address, customer.c_phone, customer.c_comment, Vnode(customer.c_custkey) as $expr2] } └─StreamHashAgg { group_key: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment], aggs: [sum($expr1), count] } - └─StreamProject { exprs: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment, (lineitem.l_extendedprice * (1.00:Decimal - lineitem.l_discount)) as $expr1, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } - └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_phone, customer.c_acctbal, customer.c_comment, lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } + └─StreamProject { exprs: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment, (lineitem.l_extendedprice * (1.00:Decimal - lineitem.l_discount)) as $expr1, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_phone, customer.c_acctbal, customer.c_comment, lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(customer.c_custkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = customer.c_nationkey, output: [nation.n_name, customer.c_custkey, customer.c_name, customer.c_address, customer.c_phone, customer.c_acctbal, customer.c_comment, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } @@ -2258,9 +2259,9 @@ Fragment 1 StreamGroupTopN { order: [sum($expr1) DESC], limit: 20, offset: 0, group_key: [$expr2] } { state table: 1 } └── StreamProject { exprs: [customer.c_custkey, customer.c_name, sum($expr1), customer.c_acctbal, nation.n_name, customer.c_address, customer.c_phone, customer.c_comment, Vnode(customer.c_custkey) as $expr2] } - └── StreamHashAgg { group_key: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment], aggs: [sum($expr1), count] } { result table: 2, state tables: [], distinct tables: [] } - └── StreamProject { exprs: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment, (lineitem.l_extendedprice * (1.00:Decimal - lineitem.l_discount)) as $expr1, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } - └── StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_phone, customer.c_acctbal, customer.c_comment, lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } + └── StreamHashAgg { group_key: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment], aggs: [sum($expr1), count] } { intermediate state table: 2, state tables: [], distinct tables: [] } + └── StreamProject { exprs: [customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment, (lineitem.l_extendedprice * (1.00:Decimal - lineitem.l_discount)) as $expr1, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └── StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_phone, customer.c_acctbal, customer.c_comment, lineitem.l_extendedprice, lineitem.l_discount, nation.n_name, nation.n_nationkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├── left table: 3 ├── right table: 5 ├── left degree table: 4 @@ -2302,7 +2303,12 @@ ├── Upstream └── BatchPlanNode - Table 0 { columns: [ customer_c_custkey, customer_c_name, sum($expr1), customer_c_acctbal, nation_n_name, customer_c_address, customer_c_phone, customer_c_comment, $expr2 ], primary key: [ $2 DESC, $0 ASC, $1 ASC, $3 ASC, $6 ASC, $4 ASC, $5 ASC, $7 ASC, $8 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ], distribution key: [], read pk prefix len hint: 0 } + Table 0 + ├── columns: [ customer_c_custkey, customer_c_name, sum($expr1), customer_c_acctbal, nation_n_name, customer_c_address, customer_c_phone, customer_c_comment, $expr2 ] + ├── primary key: [ $2 DESC, $0 ASC, $1 ASC, $3 ASC, $6 ASC, $4 ASC, $5 ASC, $7 ASC ] + ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ] + ├── distribution key: [] + └── read pk prefix len hint: 0 Table 1 ├── columns: [ customer_c_custkey, customer_c_name, sum($expr1), customer_c_acctbal, nation_n_name, customer_c_address, customer_c_phone, customer_c_comment, $expr2 ] @@ -2318,9 +2324,9 @@ Table 4 { columns: [ customer_c_custkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 { columns: [ lineitem_l_extendedprice, lineitem_l_discount, orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, orders_o_orderkey ], primary key: [ $2 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 2 ], read pk prefix len hint: 1 } + Table 5 { columns: [ lineitem_l_extendedprice, lineitem_l_discount, orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, orders_o_orderkey ], primary key: [ $2 ASC, $3 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 2 ], read pk prefix len hint: 1 } - Table 6 { columns: [ orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, orders_o_orderkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 6 { columns: [ orders_o_custkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 7 { columns: [ nation_n_nationkey, nation_n_name ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2330,9 +2336,9 @@ Table 10 { columns: [ customer_c_nationkey, customer_c_custkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 11 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 11 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 12 { columns: [ vnode, c_custkey, customer_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 12 { columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 13 { columns: [ lineitem_l_orderkey, lineitem_l_extendedprice, lineitem_l_discount, lineitem_l_linenumber ], primary key: [ $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2342,11 +2348,11 @@ Table 16 { columns: [ orders_o_orderkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 17 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 17 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 18 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 18 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ c_custkey, c_name, revenue, c_acctbal, n_name, c_address, c_phone, c_comment ], primary key: [ $2 DESC, $0 ASC, $1 ASC, $3 ASC, $6 ASC, $4 ASC, $5 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [], read pk prefix len hint: 7 } + Table 4294967294 { columns: [ c_custkey, c_name, revenue, c_acctbal, n_name, c_address, c_phone, c_comment ], primary key: [ $2 DESC, $0 ASC, $1 ASC, $3 ASC, $6 ASC, $4 ASC, $5 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [], read pk prefix len hint: 8 } - id: tpch_q11 before: @@ -2454,9 +2460,9 @@ ├─StreamProject { exprs: [partsupp.ps_partkey, sum($expr1)] } │ └─StreamHashAgg { group_key: [partsupp.ps_partkey], aggs: [sum($expr1), count] } │ └─StreamExchange { dist: HashShard(partsupp.ps_partkey) } - │ └─StreamProject { exprs: [partsupp.ps_partkey, (partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr1, partsupp.ps_suppkey, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + │ └─StreamProject { exprs: [partsupp.ps_partkey, (partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr1, partsupp.ps_suppkey, supplier.s_nationkey] } │ └─StreamShare { id: 9 } - │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_suppkey, supplier.s_nationkey, nation.n_nationkey] } + │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_nationkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, supplier.s_nationkey, partsupp.ps_suppkey, supplier.s_suppkey] } │ │ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } @@ -2472,9 +2478,9 @@ └─StreamSimpleAgg { aggs: [sum(sum($expr2)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum($expr2)] } - └─StreamProject { exprs: [(partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr2, partsupp.ps_partkey, partsupp.ps_suppkey, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + └─StreamProject { exprs: [(partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr2, partsupp.ps_partkey, partsupp.ps_suppkey, supplier.s_nationkey] } └─StreamShare { id: 9 } - └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_suppkey, supplier.s_nationkey, nation.n_nationkey] } + └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_nationkey, nation.n_nationkey] } ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, supplier.s_nationkey, partsupp.ps_suppkey, supplier.s_suppkey] } │ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } @@ -2501,18 +2507,18 @@ ├── right table: 3 ├── StreamProject { exprs: [partsupp.ps_partkey, sum($expr1)] } │ └── StreamHashAgg { group_key: [partsupp.ps_partkey], aggs: [sum($expr1), count] } - │ ├── result table: 4 + │ ├── intermediate state table: 4 │ ├── state tables: [] │ ├── distinct tables: [] │ └── StreamExchange Hash([0]) from 2 └── StreamExchange Broadcast from 8 Fragment 2 - StreamProject { exprs: [partsupp.ps_partkey, (partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr1, partsupp.ps_suppkey, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + StreamProject { exprs: [partsupp.ps_partkey, (partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr1, partsupp.ps_suppkey, supplier.s_nationkey] } └── StreamExchange NoShuffle from 3 Fragment 3 - StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_suppkey, supplier.s_nationkey, nation.n_nationkey] } + StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, partsupp.ps_suppkey, supplier.s_nationkey, nation.n_nationkey] } ├── left table: 5 ├── right table: 7 ├── left degree table: 6 @@ -2549,12 +2555,12 @@ Fragment 8 StreamProject { exprs: [(sum(sum($expr2)) * 0.0001000000:Decimal) as $expr3] } - └── StreamSimpleAgg { aggs: [sum(sum($expr2)), count] } { result table: 16, state tables: [], distinct tables: [] } + └── StreamSimpleAgg { aggs: [sum(sum($expr2)), count] } { intermediate state table: 16, state tables: [], distinct tables: [] } └── StreamExchange Single from 9 Fragment 9 StreamStatelessSimpleAgg { aggs: [sum($expr2)] } - └── StreamProject { exprs: [(partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr2, partsupp.ps_partkey, partsupp.ps_suppkey, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + └── StreamProject { exprs: [(partsupp.ps_supplycost * partsupp.ps_availqty::Decimal) as $expr2, partsupp.ps_partkey, partsupp.ps_suppkey, supplier.s_nationkey] } └── StreamExchange NoShuffle from 3 Table 0 { columns: [ partsupp_ps_partkey, sum($expr1), $expr4 ], primary key: [ $1 DESC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } @@ -2569,12 +2575,12 @@ Table 5 ├── columns: [ partsupp_ps_partkey, partsupp_ps_availqty, partsupp_ps_supplycost, supplier_s_nationkey, partsupp_ps_suppkey, supplier_s_suppkey ] - ├── primary key: [ $3 ASC, $0 ASC, $4 ASC, $5 ASC ] + ├── primary key: [ $3 ASC, $0 ASC, $4 ASC ] ├── value indices: [ 0, 1, 2, 3, 4, 5 ] ├── distribution key: [ 3 ] └── read pk prefix len hint: 1 - Table 6 { columns: [ supplier_s_nationkey, partsupp_ps_partkey, partsupp_ps_suppkey, supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 6 { columns: [ supplier_s_nationkey, partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 7 { columns: [ nation_n_nationkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2588,15 +2594,15 @@ Table 12 { columns: [ supplier_s_suppkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 13 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 13 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 14 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 14 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 15 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 15 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 16 { columns: [ sum(sum($expr2)), count ], primary key: [], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } - Table 4294967294 { columns: [ ps_partkey, value ], primary key: [ $1 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ ps_partkey, value ], primary key: [ $1 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q12 before: @@ -2670,7 +2676,7 @@ └─StreamProject { exprs: [lineitem.l_shipmode, sum($expr1), sum($expr2), Vnode(lineitem.l_shipmode) as $expr3] } └─StreamHashAgg { group_key: [lineitem.l_shipmode], aggs: [sum($expr1), sum($expr2), count] } └─StreamExchange { dist: HashShard(lineitem.l_shipmode) } - └─StreamProject { exprs: [lineitem.l_shipmode, Case(((orders.o_orderpriority = '1-URGENT':Varchar) OR (orders.o_orderpriority = '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr1, Case(((orders.o_orderpriority <> '1-URGENT':Varchar) AND (orders.o_orderpriority <> '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr2, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamProject { exprs: [lineitem.l_shipmode, Case(((orders.o_orderpriority = '1-URGENT':Varchar) OR (orders.o_orderpriority = '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr1, Case(((orders.o_orderpriority <> '1-URGENT':Varchar) AND (orders.o_orderpriority <> '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr2, orders.o_orderkey, lineitem.l_linenumber] } └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [orders.o_orderpriority, lineitem.l_shipmode, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(orders.o_orderkey) } │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority], pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } @@ -2690,13 +2696,13 @@ StreamGroupTopN { order: [lineitem.l_shipmode ASC], limit: 1, offset: 0, group_key: [$expr3] } { state table: 1 } └── StreamProject { exprs: [lineitem.l_shipmode, sum($expr1), sum($expr2), Vnode(lineitem.l_shipmode) as $expr3] } └── StreamHashAgg { group_key: [lineitem.l_shipmode], aggs: [sum($expr1), sum($expr2), count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 2 Fragment 2 - StreamProject { exprs: [lineitem.l_shipmode, Case(((orders.o_orderpriority = '1-URGENT':Varchar) OR (orders.o_orderpriority = '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr1, Case(((orders.o_orderpriority <> '1-URGENT':Varchar) AND (orders.o_orderpriority <> '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr2, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + StreamProject { exprs: [lineitem.l_shipmode, Case(((orders.o_orderpriority = '1-URGENT':Varchar) OR (orders.o_orderpriority = '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr1, Case(((orders.o_orderpriority <> '1-URGENT':Varchar) AND (orders.o_orderpriority <> '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr2, orders.o_orderkey, lineitem.l_linenumber] } └── StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [orders.o_orderpriority, lineitem.l_shipmode, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } { left table: 3, right table: 5, left degree table: 4, right degree table: 6 } ├── StreamExchange Hash([0]) from 3 └── StreamExchange Hash([0]) from 4 @@ -2727,11 +2733,11 @@ Table 6 { columns: [ lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 7 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 7 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 8 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 8 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ l_shipmode, high_line_count, low_line_count ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ l_shipmode, high_line_count, low_line_count ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q13 before: @@ -2821,7 +2827,7 @@ ├── state table: 1 └── StreamProject { exprs: [count(orders.o_orderkey), count, Vnode(count(orders.o_orderkey)) as $expr1] } └── StreamHashAgg { group_key: [count(orders.o_orderkey)], aggs: [count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([1]) from 2 @@ -2829,7 +2835,7 @@ Fragment 2 StreamProject { exprs: [customer.c_custkey, count(orders.o_orderkey)] } └── StreamHashAgg { group_key: [customer.c_custkey], aggs: [count(orders.o_orderkey), count] } - ├── result table: 3 + ├── intermediate state table: 3 ├── state tables: [] ├── distinct tables: [] └── StreamHashJoin { type: LeftOuter, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, orders.o_orderkey] } @@ -2897,22 +2903,22 @@ └── read pk prefix len hint: 1 Table 8 - ├── columns: [ vnode, c_custkey, customer_backfill_finished ] + ├── columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 Table 9 - ├── columns: [ vnode, o_orderkey, orders_backfill_finished ] + ├── columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 { columns: [ c_count, custdist ], primary key: [ $1 DESC, $0 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ c_count, custdist ], primary key: [ $1 DESC, $0 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 2 } - id: tpch_q14 before: @@ -2963,7 +2969,7 @@ └─StreamSimpleAgg { aggs: [sum(sum($expr1)), sum(sum($expr2)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum($expr1), sum($expr2)] } - └─StreamProject { exprs: [Case(Like(part.p_type, 'PROMO%':Varchar), (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)), 0:Decimal) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, lineitem.l_orderkey, lineitem.l_linenumber, part.p_partkey, lineitem.l_partkey] } + └─StreamProject { exprs: [Case(Like(part.p_type, 'PROMO%':Varchar), (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)), 0:Decimal) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey] } └─StreamHashJoin { type: Inner, predicate: lineitem.l_partkey = part.p_partkey, output: [lineitem.l_extendedprice, lineitem.l_discount, part.p_type, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey, part.p_partkey] } ├─StreamExchange { dist: HashShard(lineitem.l_partkey) } │ └─StreamProject { exprs: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } @@ -2977,14 +2983,14 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [((100.00:Decimal * sum(sum($expr1))) / sum(sum($expr2))) as $expr3] } └── StreamSimpleAgg { aggs: [sum(sum($expr1)), sum(sum($expr2)), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Single from 1 Fragment 1 StreamStatelessSimpleAgg { aggs: [sum($expr1), sum($expr2)] } - └── StreamProject { exprs: [Case(Like(part.p_type, 'PROMO%':Varchar), (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)), 0:Decimal) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, lineitem.l_orderkey, lineitem.l_linenumber, part.p_partkey, lineitem.l_partkey] } + └── StreamProject { exprs: [Case(Like(part.p_type, 'PROMO%':Varchar), (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)), 0:Decimal) as $expr1, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr2, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey] } └── StreamHashJoin { type: Inner, predicate: lineitem.l_partkey = part.p_partkey, output: [lineitem.l_extendedprice, lineitem.l_discount, part.p_type, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey, part.p_partkey] } ├── left table: 1 ├── right table: 3 @@ -3016,9 +3022,9 @@ Table 4 { columns: [ part_p_partkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 5 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 6 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 6 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ promo_revenue ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } @@ -3093,13 +3099,13 @@ └─LogicalProject { exprs: [lineitem.l_suppkey, (lineitem.l_extendedprice * (1:Int32::Decimal - lineitem.l_discount)) as $expr2] } └─LogicalScan { table: lineitem, output_columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount], required_columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate], predicate: (lineitem.l_shipdate >= '1993-01-01':Date) AND (lineitem.l_shipdate < ('1993-01-01':Date + '3 mons':Interval)) } stream_plan: |- - StreamMaterialize { columns: [s_suppkey, s_name, s_address, s_phone, total_revenue, lineitem.l_suppkey(hidden)], stream_key: [], pk_columns: [s_suppkey], pk_conflict: NoCheck } - └─StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey] } + StreamMaterialize { columns: [s_suppkey, s_name, s_address, s_phone, total_revenue], stream_key: [], pk_columns: [s_suppkey], pk_conflict: NoCheck } + └─StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1)] } └─StreamTopN { order: [supplier.s_suppkey ASC], limit: 1, offset: 0 } └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [supplier.s_suppkey ASC], limit: 1, offset: 0, group_key: [$expr3] } - └─StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey, Vnode(sum($expr1)) as $expr3] } - └─StreamHashJoin { type: Inner, predicate: sum($expr1) = max(max(sum($expr1))), output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey] } + └─StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), Vnode(sum($expr1)) as $expr3] } + └─StreamHashJoin { type: Inner, predicate: sum($expr1) = max(max(sum($expr1))), output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1)] } ├─StreamExchange { dist: HashShard(sum($expr1)) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey] } │ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } @@ -3126,16 +3132,16 @@ └─StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [s_suppkey, s_name, s_address, s_phone, total_revenue, lineitem.l_suppkey(hidden)], stream_key: [], pk_columns: [s_suppkey], pk_conflict: NoCheck } + StreamMaterialize { columns: [s_suppkey, s_name, s_address, s_phone, total_revenue], stream_key: [], pk_columns: [s_suppkey], pk_conflict: NoCheck } ├── materialized table: 4294967294 - └── StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey] } + └── StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1)] } └── StreamTopN { order: [supplier.s_suppkey ASC], limit: 1, offset: 0 } { state table: 0 } └── StreamExchange Single from 1 Fragment 1 StreamGroupTopN { order: [supplier.s_suppkey ASC], limit: 1, offset: 0, group_key: [$expr3] } { state table: 1 } - └── StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey, Vnode(sum($expr1)) as $expr3] } - └── StreamHashJoin { type: Inner, predicate: sum($expr1) = max(max(sum($expr1))), output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey] } + └── StreamProject { exprs: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), Vnode(sum($expr1)) as $expr3] } + └── StreamHashJoin { type: Inner, predicate: sum($expr1) = max(max(sum($expr1))), output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1)] } ├── left table: 2 ├── right table: 4 ├── left degree table: 3 @@ -3153,13 +3159,14 @@ └── StreamExchange NoShuffle from 4 Fragment 3 - Chain { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { state table: 10 } + Chain { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + ├── state table: 10 ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [lineitem.l_suppkey, sum($expr1)] } - └── StreamHashAgg { group_key: [lineitem.l_suppkey], aggs: [sum($expr1), count] } { result table: 11, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [lineitem.l_suppkey], aggs: [sum($expr1), count] } { intermediate state table: 11, state tables: [], distinct tables: [] } └── StreamExchange Hash([0]) from 5 Fragment 5 @@ -3172,27 +3179,21 @@ Fragment 6 StreamProject { exprs: [max(max(sum($expr1)))] } - └── StreamSimpleAgg { aggs: [max(max(sum($expr1))), count] } { result table: 14, state tables: [ 13 ], distinct tables: [] } + └── StreamSimpleAgg { aggs: [max(max(sum($expr1))), count] } { intermediate state table: 14, state tables: [ 13 ], distinct tables: [] } └── StreamExchange Single from 7 Fragment 7 - StreamHashAgg { group_key: [$expr2], aggs: [max(sum($expr1)), count] } { result table: 16, state tables: [ 15 ], distinct tables: [] } + StreamHashAgg { group_key: [$expr2], aggs: [max(sum($expr1)), count] } { intermediate state table: 16, state tables: [ 15 ], distinct tables: [] } └── StreamProject { exprs: [lineitem.l_suppkey, sum($expr1), Vnode(lineitem.l_suppkey) as $expr2] } └── StreamExchange NoShuffle from 4 - Table 0 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, sum($expr1), lineitem_l_suppkey, $expr3 ], primary key: [ $0 ASC, $6 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [], read pk prefix len hint: 0 } + Table 0 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, sum($expr1), $expr3 ], primary key: [ $0 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [], read pk prefix len hint: 0 } - Table 1 - ├── columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, sum($expr1), lineitem_l_suppkey, $expr3 ] - ├── primary key: [ $6 ASC, $0 ASC, $5 ASC, $4 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6 ] - ├── distribution key: [ 4 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 6 + Table 1 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, sum($expr1), $expr3 ], primary key: [ $5 ASC, $0 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 4 ], read pk prefix len hint: 1, vnode column idx: 5 } - Table 2 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, sum($expr1), lineitem_l_suppkey ], primary key: [ $4 ASC, $0 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 4 ], read pk prefix len hint: 1 } + Table 2 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_phone, sum($expr1), lineitem_l_suppkey ], primary key: [ $4 ASC, $0 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 4 ], read pk prefix len hint: 1 } - Table 3 { columns: [ sum($expr1), supplier_s_suppkey, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 3 { columns: [ sum($expr1), supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 4 { columns: [ max(max(sum($expr1))) ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -3206,11 +3207,11 @@ Table 9 { columns: [ lineitem_l_suppkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 10 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 10 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 11 { columns: [ lineitem_l_suppkey, sum($expr1), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 12 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 12 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 13 { columns: [ max(sum($expr1)), $expr2 ], primary key: [ $0 DESC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } @@ -3220,7 +3221,7 @@ Table 16 { columns: [ $expr2, max(sum($expr1)), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ s_suppkey, s_name, s_address, s_phone, total_revenue, lineitem.l_suppkey ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ s_suppkey, s_name, s_address, s_phone, total_revenue ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q16 before: @@ -3300,7 +3301,7 @@ └─StreamProject { exprs: [part.p_brand, part.p_type, part.p_size, count(distinct partsupp.ps_suppkey), Vnode(part.p_brand, part.p_type, part.p_size) as $expr1] } └─StreamHashAgg { group_key: [part.p_brand, part.p_type, part.p_size], aggs: [count(distinct partsupp.ps_suppkey), count] } └─StreamExchange { dist: HashShard(part.p_brand, part.p_type, part.p_size) } - └─StreamHashJoin { type: LeftAnti, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: all } + └─StreamHashJoin { type: LeftAnti, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_suppkey, part.p_brand, part.p_type, part.p_size, partsupp.ps_partkey] } ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, part.p_brand, part.p_type, part.p_size, partsupp.ps_partkey, part.p_partkey] } │ ├─StreamExchange { dist: HashShard(partsupp.ps_partkey) } @@ -3326,13 +3327,13 @@ ├── state table: 1 └── StreamProject { exprs: [part.p_brand, part.p_type, part.p_size, count(distinct partsupp.ps_suppkey), Vnode(part.p_brand, part.p_type, part.p_size) as $expr1] } └── StreamHashAgg { group_key: [part.p_brand, part.p_type, part.p_size], aggs: [count(distinct partsupp.ps_suppkey), count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [ (distinct key: partsupp.ps_suppkey, table id: 3) ] └── StreamExchange Hash([1, 2, 3]) from 2 Fragment 2 - StreamHashJoin { type: LeftAnti, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: all } + StreamHashJoin { type: LeftAnti, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_suppkey, part.p_brand, part.p_type, part.p_size, partsupp.ps_partkey] } ├── left table: 4 ├── right table: 6 ├── left degree table: 5 @@ -3399,12 +3400,12 @@ Table 4 ├── columns: [ partsupp_ps_suppkey, part_p_brand, part_p_type, part_p_size, partsupp_ps_partkey, part_p_partkey ] - ├── primary key: [ $0 ASC, $4 ASC, $5 ASC ] + ├── primary key: [ $0 ASC, $4 ASC ] ├── value indices: [ 0, 1, 2, 3, 4, 5 ] ├── distribution key: [ 0 ] └── read pk prefix len hint: 1 - Table 5 { columns: [ partsupp_ps_suppkey, partsupp_ps_partkey, part_p_partkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 5 { columns: [ partsupp_ps_suppkey, partsupp_ps_partkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 6 { columns: [ supplier_s_suppkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -3418,13 +3419,25 @@ Table 11 { columns: [ part_p_partkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 12 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 12 + ├── columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3, 4 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 - Table 13 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 13 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 14 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 14 + ├── columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 - Table 4294967294 { columns: [ p_brand, p_type, p_size, supplier_cnt ], primary key: [ $3 DESC, $0 ASC, $1 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ p_brand, p_type, p_size, supplier_cnt ], primary key: [ $3 DESC, $0 ASC, $1 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 4 } - id: tpch_q17 before: @@ -3501,7 +3514,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(lineitem.l_extendedprice)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(lineitem.l_extendedprice)] } - └─StreamProject { exprs: [lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber, part.p_partkey, lineitem.l_partkey, part.p_partkey] } + └─StreamProject { exprs: [lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey, part.p_partkey, part.p_partkey] } └─StreamFilter { predicate: (lineitem.l_quantity < $expr1) } └─StreamHashJoin { type: Inner, predicate: part.p_partkey IS NOT DISTINCT FROM part.p_partkey, output: all } ├─StreamExchange { dist: HashShard(part.p_partkey) } @@ -3530,19 +3543,19 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [(sum(sum(lineitem.l_extendedprice)) / 7.0:Decimal) as $expr2] } └── StreamSimpleAgg { aggs: [sum(sum(lineitem.l_extendedprice)), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Single from 1 Fragment 1 StreamStatelessSimpleAgg { aggs: [sum(lineitem.l_extendedprice)] } - └── StreamProject { exprs: [lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber, part.p_partkey, lineitem.l_partkey, part.p_partkey] } + └── StreamProject { exprs: [lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey, part.p_partkey, part.p_partkey] } └── StreamFilter { predicate: (lineitem.l_quantity < $expr1) } └── StreamHashJoin { type: Inner, predicate: part.p_partkey IS NOT DISTINCT FROM part.p_partkey, output: all } { left table: 1, right table: 3, left degree table: 2, right degree table: 4 } ├── StreamExchange Hash([2]) from 2 └── StreamProject { exprs: [part.p_partkey, (0.2:Decimal * (sum(lineitem.l_quantity) / count(lineitem.l_quantity)::Decimal)) as $expr1] } - └── StreamHashAgg { group_key: [part.p_partkey], aggs: [sum(lineitem.l_quantity), count(lineitem.l_quantity), count] } { result table: 11, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [part.p_partkey], aggs: [sum(lineitem.l_quantity), count(lineitem.l_quantity), count] } { intermediate state table: 11, state tables: [], distinct tables: [] } └── StreamHashJoin { type: LeftOuter, predicate: part.p_partkey IS NOT DISTINCT FROM lineitem.l_partkey, output: [part.p_partkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber] } ├── left table: 12 ├── right table: 14 @@ -3575,7 +3588,7 @@ Fragment 5 StreamProject { exprs: [part.p_partkey] } - └── StreamHashAgg { group_key: [part.p_partkey], aggs: [count] } { result table: 16, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [part.p_partkey], aggs: [count] } { intermediate state table: 16, state tables: [], distinct tables: [] } └── StreamProject { exprs: [part.p_partkey] } └── StreamFilter { predicate: (part.p_brand = 'Brand#13':Varchar) AND (part.p_container = 'JUMBO PKG':Varchar) } └── Chain { table: part, columns: [part.p_partkey, part.p_brand, part.p_container], pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { state table: 17 } @@ -3611,9 +3624,9 @@ Table 8 { columns: [ part_p_partkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 9 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 9 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 10 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 10 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 11 { columns: [ part_p_partkey, sum(lineitem_l_quantity), count(lineitem_l_quantity), count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -3627,9 +3640,9 @@ Table 16 { columns: [ part_p_partkey, count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 17 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 17 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 18 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 18 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ avg_yearly ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } @@ -3729,7 +3742,7 @@ └─StreamGroupTopN { order: [orders.o_totalprice DESC, orders.o_orderdate ASC], limit: 100, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [customer.c_name, customer.c_custkey, orders.o_orderkey, orders.o_orderdate, orders.o_totalprice, sum(lineitem.l_quantity), Vnode(orders.o_orderkey) as $expr1] } └─StreamHashAgg { group_key: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate], aggs: [sum(lineitem.l_quantity), count] } - └─StreamHashJoin { type: LeftSemi, predicate: orders.o_orderkey = lineitem.l_orderkey, output: all } + └─StreamHashJoin { type: LeftSemi, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate, lineitem.l_quantity, lineitem.l_linenumber] } ├─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber] } │ ├─StreamExchange { dist: HashShard(orders.o_orderkey) } │ │ └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate] } @@ -3756,8 +3769,12 @@ Fragment 1 StreamGroupTopN { order: [orders.o_totalprice DESC, orders.o_orderdate ASC], limit: 100, offset: 0, group_key: [$expr1] } { state table: 1 } └── StreamProject { exprs: [customer.c_name, customer.c_custkey, orders.o_orderkey, orders.o_orderdate, orders.o_totalprice, sum(lineitem.l_quantity), Vnode(orders.o_orderkey) as $expr1] } - └── StreamHashAgg { group_key: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate], aggs: [sum(lineitem.l_quantity), count] } { result table: 2, state tables: [], distinct tables: [] } - └── StreamHashJoin { type: LeftSemi, predicate: orders.o_orderkey = lineitem.l_orderkey, output: all } { left table: 3, right table: 5, left degree table: 4, right degree table: 6 } + └── StreamHashAgg { group_key: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate], aggs: [sum(lineitem.l_quantity), count] } { intermediate state table: 2, state tables: [], distinct tables: [] } + └── StreamHashJoin { type: LeftSemi, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate, lineitem.l_quantity, lineitem.l_linenumber] } + ├── left table: 3 + ├── right table: 5 + ├── left degree table: 4 + ├── right degree table: 6 ├── StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber] } │ ├── left table: 7 │ ├── right table: 9 @@ -3768,7 +3785,7 @@ └── StreamProject { exprs: [lineitem.l_orderkey] } └── StreamFilter { predicate: (sum(lineitem.l_quantity) > 1:Decimal) } └── StreamProject { exprs: [lineitem.l_orderkey, sum(lineitem.l_quantity)] } - └── StreamHashAgg { group_key: [lineitem.l_orderkey], aggs: [sum(lineitem.l_quantity), count] } { result table: 18, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [lineitem.l_orderkey], aggs: [sum(lineitem.l_quantity), count] } { intermediate state table: 18, state tables: [], distinct tables: [] } └── StreamExchange Hash([0]) from 6 Fragment 2 @@ -3802,7 +3819,7 @@ Table 0 ├── columns: [ customer_c_name, customer_c_custkey, orders_o_orderkey, orders_o_orderdate, orders_o_totalprice, sum(lineitem_l_quantity), $expr1 ] - ├── primary key: [ $4 DESC, $3 ASC, $1 ASC, $0 ASC, $2 ASC, $6 ASC ] + ├── primary key: [ $4 DESC, $3 ASC, $1 ASC, $0 ASC, $2 ASC ] ├── value indices: [ 0, 1, 2, 3, 4, 5, 6 ] ├── distribution key: [] └── read pk prefix len hint: 0 @@ -3824,12 +3841,12 @@ Table 3 ├── columns: [ customer_c_custkey, customer_c_name, orders_o_orderkey, orders_o_totalprice, orders_o_orderdate, lineitem_l_quantity, lineitem_l_orderkey, lineitem_l_linenumber ] - ├── primary key: [ $2 ASC, $0 ASC, $6 ASC, $7 ASC ] + ├── primary key: [ $2 ASC, $0 ASC, $7 ASC ] ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ] ├── distribution key: [ 2 ] └── read pk prefix len hint: 1 - Table 4 { columns: [ orders_o_orderkey, customer_c_custkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 4 { columns: [ orders_o_orderkey, customer_c_custkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 5 { columns: [ lineitem_l_orderkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -3851,15 +3868,15 @@ Table 14 { columns: [ orders_o_custkey, orders_o_orderkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 15 { columns: [ vnode, c_custkey, customer_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 15 { columns: [ vnode, c_custkey, customer_backfill_finished, customer_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 16 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 16 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 17 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 17 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 18 { columns: [ lineitem_l_orderkey, sum(lineitem_l_quantity), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 19 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 19 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, quantity ], primary key: [ $4 DESC, $3 ASC, $1 ASC, $0 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [], read pk prefix len hint: 5 } @@ -3932,7 +3949,7 @@ └─StreamSimpleAgg { aggs: [sum(sum($expr1)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum($expr1)] } - └─StreamProject { exprs: [(lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber, part.p_partkey, lineitem.l_partkey] } + └─StreamProject { exprs: [(lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey] } └─StreamFilter { predicate: (((((((part.p_brand = 'Brand#52':Varchar) AND In(part.p_container, 'SM CASE':Varchar, 'SM BOX':Varchar, 'SM PACK':Varchar, 'SM PKG':Varchar)) AND (lineitem.l_quantity >= 1:Decimal)) AND (lineitem.l_quantity <= 11:Decimal)) AND (part.p_size <= 5:Int32)) OR (((((part.p_brand = 'Brand#24':Varchar) AND In(part.p_container, 'MED BAG':Varchar, 'MED BOX':Varchar, 'MED PKG':Varchar, 'MED PACK':Varchar)) AND (lineitem.l_quantity >= 30:Decimal)) AND (lineitem.l_quantity <= 40:Decimal)) AND (part.p_size <= 10:Int32))) OR (((((part.p_brand = 'Brand#32':Varchar) AND In(part.p_container, 'LG CASE':Varchar, 'LG BOX':Varchar, 'LG PACK':Varchar, 'LG PKG':Varchar)) AND (lineitem.l_quantity >= 10:Decimal)) AND (lineitem.l_quantity <= 20:Decimal)) AND (part.p_size <= 15:Int32))) } └─StreamHashJoin { type: Inner, predicate: lineitem.l_partkey = part.p_partkey, output: all } ├─StreamExchange { dist: HashShard(lineitem.l_partkey) } @@ -3948,14 +3965,14 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [sum(sum($expr1))] } └── StreamSimpleAgg { aggs: [sum(sum($expr1)), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Single from 1 Fragment 1 StreamStatelessSimpleAgg { aggs: [sum($expr1)] } - └── StreamProject { exprs: [(lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber, part.p_partkey, lineitem.l_partkey] } + └── StreamProject { exprs: [(lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey] } └── StreamFilter { predicate: (((((((part.p_brand = 'Brand#52':Varchar) AND In(part.p_container, 'SM CASE':Varchar, 'SM BOX':Varchar, 'SM PACK':Varchar, 'SM PKG':Varchar)) AND (lineitem.l_quantity >= 1:Decimal)) AND (lineitem.l_quantity <= 11:Decimal)) AND (part.p_size <= 5:Int32)) OR (((((part.p_brand = 'Brand#24':Varchar) AND In(part.p_container, 'MED BAG':Varchar, 'MED BOX':Varchar, 'MED PKG':Varchar, 'MED PACK':Varchar)) AND (lineitem.l_quantity >= 30:Decimal)) AND (lineitem.l_quantity <= 40:Decimal)) AND (part.p_size <= 10:Int32))) OR (((((part.p_brand = 'Brand#32':Varchar) AND In(part.p_container, 'LG CASE':Varchar, 'LG BOX':Varchar, 'LG PACK':Varchar, 'LG PKG':Varchar)) AND (lineitem.l_quantity >= 10:Decimal)) AND (lineitem.l_quantity <= 20:Decimal)) AND (part.p_size <= 15:Int32))) } └── StreamHashJoin { type: Inner, predicate: lineitem.l_partkey = part.p_partkey, output: all } { left table: 1, right table: 3, left degree table: 2, right degree table: 4 } ├── StreamExchange Hash([0]) from 2 @@ -3984,9 +4001,9 @@ Table 4 { columns: [ part_p_partkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 5 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 6 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 6 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ revenue ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } @@ -4052,13 +4069,13 @@ └─BatchFilter { predicate: (lineitem.l_shipdate >= '1994-01-01':Date) AND (lineitem.l_shipdate < '1995-01-01 00:00:00':Timestamp) } └─BatchScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_shipdate], distribution: SomeShard } stream_plan: |- - StreamMaterialize { columns: [s_name, s_address, supplier.s_suppkey(hidden), nation.n_nationkey(hidden), supplier.s_nationkey(hidden)], stream_key: [], pk_columns: [s_name], pk_conflict: NoCheck } - └─StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + StreamMaterialize { columns: [s_name, s_address, supplier.s_suppkey(hidden), supplier.s_nationkey(hidden)], stream_key: [], pk_columns: [s_name], pk_conflict: NoCheck } + └─StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, supplier.s_nationkey] } └─StreamTopN { order: [supplier.s_name ASC], limit: 1, offset: 0 } └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [supplier.s_name ASC], limit: 1, offset: 0, group_key: [$expr3] } - └─StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey, Vnode(supplier.s_suppkey) as $expr3] } - └─StreamHashJoin { type: LeftSemi, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_name, supplier.s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + └─StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, supplier.s_nationkey, Vnode(supplier.s_suppkey) as $expr3] } + └─StreamHashJoin { type: LeftSemi, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_name, supplier.s_address, supplier.s_suppkey, supplier.s_nationkey] } ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: all } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } @@ -4068,7 +4085,7 @@ │ └─StreamFilter { predicate: (nation.n_name = 'KENYA':Varchar) } │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } - └─StreamHashJoin { type: LeftSemi, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, partsupp.ps_partkey, lineitem.l_partkey, lineitem.l_suppkey] } + └─StreamHashJoin { type: LeftSemi, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, partsupp.ps_partkey] } ├─StreamExchange { dist: HashShard(partsupp.ps_partkey) } │ └─StreamProject { exprs: [partsupp.ps_partkey, partsupp.ps_suppkey, lineitem.l_partkey, lineitem.l_suppkey] } │ └─StreamFilter { predicate: ($expr1 > $expr2) } @@ -4088,16 +4105,16 @@ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [s_name, s_address, supplier.s_suppkey(hidden), nation.n_nationkey(hidden), supplier.s_nationkey(hidden)], stream_key: [], pk_columns: [s_name], pk_conflict: NoCheck } + StreamMaterialize { columns: [s_name, s_address, supplier.s_suppkey(hidden), supplier.s_nationkey(hidden)], stream_key: [], pk_columns: [s_name], pk_conflict: NoCheck } ├── materialized table: 4294967294 - └── StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + └── StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, supplier.s_nationkey] } └── StreamTopN { order: [supplier.s_name ASC], limit: 1, offset: 0 } { state table: 0 } └── StreamExchange Single from 1 Fragment 1 StreamGroupTopN { order: [supplier.s_name ASC], limit: 1, offset: 0, group_key: [$expr3] } { state table: 1 } - └── StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey, Vnode(supplier.s_suppkey) as $expr3] } - └── StreamHashJoin { type: LeftSemi, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_name, supplier.s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey] } + └── StreamProject { exprs: [supplier.s_name, supplier.s_address, supplier.s_suppkey, supplier.s_nationkey, Vnode(supplier.s_suppkey) as $expr3] } + └── StreamHashJoin { type: LeftSemi, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [supplier.s_name, supplier.s_address, supplier.s_suppkey, supplier.s_nationkey] } ├── left table: 2 ├── right table: 4 ├── left degree table: 3 @@ -4124,7 +4141,7 @@ └── BatchPlanNode Fragment 5 - StreamHashJoin { type: LeftSemi, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, partsupp.ps_partkey, lineitem.l_partkey, lineitem.l_suppkey] } + StreamHashJoin { type: LeftSemi, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, partsupp.ps_partkey] } ├── left table: 12 ├── right table: 14 ├── left degree table: 13 @@ -4142,7 +4159,10 @@ ├── right degree table: 19 ├── StreamExchange Hash([0, 1]) from 7 └── StreamProject { exprs: [lineitem.l_partkey, lineitem.l_suppkey, (0.5:Decimal * sum(lineitem.l_quantity)) as $expr2] } - └── StreamHashAgg { group_key: [lineitem.l_partkey, lineitem.l_suppkey], aggs: [sum(lineitem.l_quantity), count] } { result table: 21, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [lineitem.l_partkey, lineitem.l_suppkey], aggs: [sum(lineitem.l_quantity), count] } + ├── intermediate state table: 21 + ├── state tables: [] + ├── distinct tables: [] └── StreamExchange Hash([0, 1]) from 8 Fragment 7 @@ -4167,23 +4187,17 @@ ├── Upstream └── BatchPlanNode - Table 0 { columns: [ supplier_s_name, supplier_s_address, supplier_s_suppkey, nation_n_nationkey, supplier_s_nationkey, $expr3 ], primary key: [ $0 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [], read pk prefix len hint: 0 } + Table 0 { columns: [ supplier_s_name, supplier_s_address, supplier_s_suppkey, supplier_s_nationkey, $expr3 ], primary key: [ $0 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [], read pk prefix len hint: 0 } - Table 1 - ├── columns: [ supplier_s_name, supplier_s_address, supplier_s_suppkey, nation_n_nationkey, supplier_s_nationkey, $expr3 ] - ├── primary key: [ $5 ASC, $0 ASC, $2 ASC, $3 ASC, $4 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5 ] - ├── distribution key: [ 2 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 5 + Table 1 { columns: [ supplier_s_name, supplier_s_address, supplier_s_suppkey, supplier_s_nationkey, $expr3 ], primary key: [ $4 ASC, $0 ASC, $2 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 2 ], read pk prefix len hint: 1, vnode column idx: 4 } - Table 2 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_nationkey, nation_n_nationkey ], primary key: [ $0 ASC, $4 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 2 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_nationkey, nation_n_nationkey ], primary key: [ $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 3 { columns: [ supplier_s_suppkey, nation_n_nationkey, supplier_s_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 3 { columns: [ supplier_s_suppkey, supplier_s_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 4 { columns: [ partsupp_ps_suppkey, partsupp_ps_partkey, lineitem_l_partkey, lineitem_l_suppkey ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 4 { columns: [ partsupp_ps_suppkey, partsupp_ps_partkey ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 { columns: [ partsupp_ps_suppkey, partsupp_ps_partkey, lineitem_l_partkey, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 5 { columns: [ partsupp_ps_suppkey, partsupp_ps_partkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 6 { columns: [ supplier_s_suppkey, supplier_s_name, supplier_s_address, supplier_s_nationkey ], primary key: [ $3 ASC, $0 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 3 ], read pk prefix len hint: 1 } @@ -4193,13 +4207,13 @@ Table 9 { columns: [ nation_n_nationkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 10 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 10 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 11 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 11 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 12 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, lineitem_l_partkey, lineitem_l_suppkey ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 12 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, lineitem_l_partkey, lineitem_l_suppkey ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 13 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, lineitem_l_partkey, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 13 { columns: [ partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 14 { columns: [ part_p_partkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -4213,15 +4227,15 @@ Table 19 { columns: [ lineitem_l_partkey, lineitem_l_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } - Table 20 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 20 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 21 { columns: [ lineitem_l_partkey, lineitem_l_suppkey, sum(lineitem_l_quantity), count ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2, 3 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } - Table 22 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 22 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 23 { columns: [ vnode, p_partkey, part_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 23 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ s_name, s_address, supplier.s_suppkey, nation.n_nationkey, supplier.s_nationkey ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [], read pk prefix len hint: 0 } + Table 4294967294 { columns: [ s_name, s_address, supplier.s_suppkey, supplier.s_nationkey ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [], read pk prefix len hint: 1 } - id: tpch_q21 before: @@ -4337,7 +4351,7 @@ └─StreamProject { exprs: [supplier.s_name, count, Vnode(supplier.s_name) as $expr1] } └─StreamHashAgg { group_key: [supplier.s_name], aggs: [count] } └─StreamExchange { dist: HashShard(supplier.s_name) } - └─StreamHashJoin { type: LeftAnti, predicate: lineitem.l_orderkey = lineitem.l_orderkey AND (lineitem.l_suppkey <> lineitem.l_suppkey), output: [supplier.s_name, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + └─StreamHashJoin { type: LeftAnti, predicate: lineitem.l_orderkey = lineitem.l_orderkey AND (lineitem.l_suppkey <> lineitem.l_suppkey), output: [supplier.s_name, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_linenumber, lineitem.l_orderkey] } ├─StreamHashJoin { type: LeftSemi, predicate: lineitem.l_orderkey = lineitem.l_orderkey AND (lineitem.l_suppkey <> lineitem.l_suppkey), output: all } │ ├─StreamExchange { dist: HashShard(lineitem.l_orderkey) } │ │ └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [supplier.s_name, lineitem.l_orderkey, lineitem.l_suppkey, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_linenumber] } @@ -4378,13 +4392,13 @@ ├── state table: 1 └── StreamProject { exprs: [supplier.s_name, count, Vnode(supplier.s_name) as $expr1] } └── StreamHashAgg { group_key: [supplier.s_name], aggs: [count] } - ├── result table: 2 + ├── intermediate state table: 2 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 2 Fragment 2 - StreamHashJoin { type: LeftAnti, predicate: lineitem.l_orderkey = lineitem.l_orderkey AND (lineitem.l_suppkey <> lineitem.l_suppkey), output: [supplier.s_name, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } + StreamHashJoin { type: LeftAnti, predicate: lineitem.l_orderkey = lineitem.l_orderkey AND (lineitem.l_suppkey <> lineitem.l_suppkey), output: [supplier.s_name, nation.n_nationkey, supplier.s_suppkey, orders.o_orderkey, lineitem.l_linenumber, lineitem.l_orderkey] } ├── left table: 3 ├── right table: 5 ├── left degree table: 4 @@ -4453,7 +4467,7 @@ ├── Upstream └── BatchPlanNode - Table 0 { columns: [ supplier_s_name, count, $expr1 ], primary key: [ $1 DESC, $0 ASC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } + Table 0 { columns: [ supplier_s_name, count, $expr1 ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } Table 1 { columns: [ supplier_s_name, count, $expr1 ], primary key: [ $2 ASC, $1 DESC, $0 ASC ], value indices: [ 0, 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 2 } @@ -4489,9 +4503,9 @@ Table 12 { columns: [ supplier_s_suppkey, nation_n_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 13 { columns: [ lineitem_l_orderkey, lineitem_l_suppkey, orders_o_orderkey, lineitem_l_linenumber ], primary key: [ $1 ASC, $2 ASC, $0 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 1 ], read pk prefix len hint: 1 } + Table 13 { columns: [ lineitem_l_orderkey, lineitem_l_suppkey, orders_o_orderkey, lineitem_l_linenumber ], primary key: [ $1 ASC, $2 ASC, $3 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - Table 14 { columns: [ lineitem_l_suppkey, orders_o_orderkey, lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 14 { columns: [ lineitem_l_suppkey, orders_o_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 15 { columns: [ nation_n_nationkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -4501,9 +4515,9 @@ Table 18 { columns: [ supplier_s_nationkey, supplier_s_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 19 { columns: [ vnode, n_nationkey, nation_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 19 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 20 { columns: [ vnode, s_suppkey, supplier_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 20 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 21 { columns: [ orders_o_orderkey ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -4513,15 +4527,15 @@ Table 24 { columns: [ lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 25 { columns: [ vnode, o_orderkey, orders_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 25 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 26 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 26 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 27 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 27 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 28 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 28 { columns: [ vnode, l_orderkey, l_linenumber, lineitem_backfill_finished, lineitem_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 { columns: [ s_name, numwait ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } + Table 4294967294 { columns: [ s_name, numwait ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 2 } - id: tpch_q22 before: diff --git a/src/frontend/planner_test/tests/testdata/output/tpch_variant.yaml b/src/frontend/planner_test/tests/testdata/output/tpch_variant.yaml index ebc0edb068a29..fdf928a0c9c84 100644 --- a/src/frontend/planner_test/tests/testdata/output/tpch_variant.yaml +++ b/src/frontend/planner_test/tests/testdata/output/tpch_variant.yaml @@ -242,215 +242,219 @@ └─BatchExchange { order: [], dist: HashShard(r_regionkey) } └─BatchSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id], filter: (None, None) } stream_plan: |- - StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id(hidden), _row_id#1(hidden), r_regionkey(hidden), _row_id#2(hidden), _row_id#3(hidden), _row_id#4(hidden), ps_suppkey(hidden), n_nationkey(hidden), ps_supplycost(hidden), p_partkey#1(hidden)], stream_key: [_row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, p_partkey, _row_id#4, ps_suppkey, n_nationkey, p_partkey#1, ps_supplycost], pk_columns: [s_acctbal, n_name, s_name, p_partkey, _row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, _row_id#4, ps_suppkey, n_nationkey, p_partkey#1, ps_supplycost], pk_conflict: NoCheck } - └─StreamHashJoin { type: Inner, predicate: p_partkey IS NOT DISTINCT FROM p_partkey AND ps_supplycost = min(ps_supplycost), output: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id, _row_id, r_regionkey, _row_id, _row_id, _row_id, ps_suppkey, n_nationkey, ps_supplycost, p_partkey] } - ├─StreamExchange { dist: HashShard(p_partkey) } - │ └─StreamShare { id: 26 } - │ └─StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey, output: [p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, _row_id, ps_suppkey] } - │ ├─StreamExchange { dist: HashShard(n_nationkey) } - │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } - │ │ ├─StreamExchange { dist: HashShard(r_regionkey) } - │ │ │ └─StreamShare { id: 3 } - │ │ │ └─StreamProject { exprs: [r_regionkey, _row_id] } - │ │ │ └─StreamRowIdGen { row_id_index: 3 } - │ │ │ └─StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } - │ │ └─StreamExchange { dist: HashShard(n_regionkey) } - │ │ └─StreamShare { id: 7 } - │ │ └─StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } - │ │ └─StreamRowIdGen { row_id_index: 4 } - │ │ └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(s_nationkey) } - │ └─StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [p_partkey, p_mfgr, ps_supplycost, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id, _row_id, ps_suppkey, _row_id] } - │ ├─StreamExchange { dist: HashShard(ps_suppkey) } - │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = ps_partkey, output: [p_partkey, p_mfgr, ps_suppkey, ps_supplycost, _row_id, _row_id] } - │ │ ├─StreamExchange { dist: HashShard(p_partkey) } - │ │ │ └─StreamRowIdGen { row_id_index: 9 } - │ │ │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } - │ │ └─StreamExchange { dist: HashShard(ps_partkey) } - │ │ └─StreamShare { id: 15 } - │ │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } - │ │ └─StreamRowIdGen { row_id_index: 5 } - │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(s_suppkey) } - │ └─StreamShare { id: 21 } - │ └─StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - │ └─StreamRowIdGen { row_id_index: 7 } - │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - └─StreamProject { exprs: [p_partkey, min(ps_supplycost)] } - └─StreamHashAgg { group_key: [p_partkey], aggs: [min(ps_supplycost), count] } - └─StreamHashJoin { type: LeftOuter, predicate: p_partkey IS NOT DISTINCT FROM ps_partkey, output: [p_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, _row_id, _row_id, r_regionkey, s_nationkey] } - ├─StreamAppendOnlyDedup { dedup_cols: [p_partkey] } - │ └─StreamExchange { dist: HashShard(p_partkey) } - │ └─StreamProject { exprs: [p_partkey] } - │ └─StreamShare { id: 26 } - │ └─StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey, output: [p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, _row_id, ps_suppkey] } - │ ├─StreamExchange { dist: HashShard(n_nationkey) } - │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } - │ │ ├─StreamExchange { dist: HashShard(r_regionkey) } - │ │ │ └─StreamShare { id: 3 } - │ │ │ └─StreamProject { exprs: [r_regionkey, _row_id] } - │ │ │ └─StreamRowIdGen { row_id_index: 3 } - │ │ │ └─StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } - │ │ └─StreamExchange { dist: HashShard(n_regionkey) } - │ │ └─StreamShare { id: 7 } - │ │ └─StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } - │ │ └─StreamRowIdGen { row_id_index: 4 } - │ │ └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(s_nationkey) } - │ └─StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [p_partkey, p_mfgr, ps_supplycost, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id, _row_id, ps_suppkey, _row_id] } - │ ├─StreamExchange { dist: HashShard(ps_suppkey) } - │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = ps_partkey, output: [p_partkey, p_mfgr, ps_suppkey, ps_supplycost, _row_id, _row_id] } - │ │ ├─StreamExchange { dist: HashShard(p_partkey) } - │ │ │ └─StreamRowIdGen { row_id_index: 9 } - │ │ │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } - │ │ └─StreamExchange { dist: HashShard(ps_partkey) } - │ │ └─StreamShare { id: 15 } - │ │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } - │ │ └─StreamRowIdGen { row_id_index: 5 } - │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(s_suppkey) } - │ └─StreamShare { id: 21 } - │ └─StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - │ └─StreamRowIdGen { row_id_index: 7 } - │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - └─StreamExchange { dist: HashShard(ps_partkey) } - └─StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [ps_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, s_nationkey, _row_id, _row_id, r_regionkey] } - ├─StreamExchange { dist: HashShard(s_nationkey) } - │ └─StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [ps_partkey, ps_supplycost, s_nationkey, _row_id, ps_suppkey, _row_id] } - │ ├─StreamExchange { dist: HashShard(ps_suppkey) } - │ │ └─StreamFilter { predicate: IsNotNull(ps_partkey) } - │ │ └─StreamShare { id: 15 } - │ │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } - │ │ └─StreamRowIdGen { row_id_index: 5 } - │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(s_suppkey) } - │ └─StreamShare { id: 21 } - │ └─StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - │ └─StreamRowIdGen { row_id_index: 7 } - │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - └─StreamExchange { dist: HashShard(n_nationkey) } - └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, _row_id, r_regionkey, _row_id] } - ├─StreamExchange { dist: HashShard(r_regionkey) } - │ └─StreamShare { id: 3 } - │ └─StreamProject { exprs: [r_regionkey, _row_id] } - │ └─StreamRowIdGen { row_id_index: 3 } - │ └─StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } - └─StreamExchange { dist: HashShard(n_regionkey) } - └─StreamShare { id: 7 } - └─StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } - └─StreamRowIdGen { row_id_index: 4 } - └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } + StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id(hidden), _row_id#1(hidden), r_regionkey(hidden), _row_id#2(hidden), _row_id#3(hidden), _row_id#4(hidden), ps_suppkey(hidden), n_nationkey(hidden), ps_supplycost(hidden), p_partkey#1(hidden)], stream_key: [_row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, p_partkey, _row_id#4, ps_suppkey, n_nationkey, ps_supplycost], pk_columns: [s_acctbal, n_name, s_name, p_partkey, _row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, _row_id#4, ps_suppkey, n_nationkey, ps_supplycost], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(p_partkey, _row_id, _row_id, r_regionkey, _row_id, _row_id, _row_id, ps_suppkey, n_nationkey, ps_supplycost) } + └─StreamHashJoin { type: Inner, predicate: p_partkey IS NOT DISTINCT FROM p_partkey AND ps_supplycost = min(ps_supplycost), output: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id, _row_id, r_regionkey, _row_id, _row_id, _row_id, ps_suppkey, n_nationkey, ps_supplycost, p_partkey] } + ├─StreamExchange { dist: HashShard(p_partkey) } + │ └─StreamShare { id: 26 } + │ └─StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey, output: [p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, _row_id, ps_suppkey] } + │ ├─StreamExchange { dist: HashShard(n_nationkey) } + │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } + │ │ ├─StreamExchange { dist: HashShard(r_regionkey) } + │ │ │ └─StreamShare { id: 3 } + │ │ │ └─StreamProject { exprs: [r_regionkey, _row_id] } + │ │ │ └─StreamRowIdGen { row_id_index: 3 } + │ │ │ └─StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } + │ │ └─StreamExchange { dist: HashShard(n_regionkey) } + │ │ └─StreamShare { id: 7 } + │ │ └─StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } + │ │ └─StreamRowIdGen { row_id_index: 4 } + │ │ └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(s_nationkey) } + │ └─StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [p_partkey, p_mfgr, ps_supplycost, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id, _row_id, ps_suppkey, _row_id] } + │ ├─StreamExchange { dist: HashShard(ps_suppkey) } + │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = ps_partkey, output: [p_partkey, p_mfgr, ps_suppkey, ps_supplycost, _row_id, _row_id] } + │ │ ├─StreamExchange { dist: HashShard(p_partkey) } + │ │ │ └─StreamRowIdGen { row_id_index: 9 } + │ │ │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } + │ │ └─StreamExchange { dist: HashShard(ps_partkey) } + │ │ └─StreamShare { id: 15 } + │ │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } + │ │ └─StreamRowIdGen { row_id_index: 5 } + │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(s_suppkey) } + │ └─StreamShare { id: 21 } + │ └─StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + │ └─StreamRowIdGen { row_id_index: 7 } + │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + └─StreamProject { exprs: [p_partkey, min(ps_supplycost)] } + └─StreamHashAgg { group_key: [p_partkey], aggs: [min(ps_supplycost), count] } + └─StreamHashJoin { type: LeftOuter, predicate: p_partkey IS NOT DISTINCT FROM ps_partkey, output: [p_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, _row_id, _row_id, r_regionkey, s_nationkey] } + ├─StreamAppendOnlyDedup { dedup_cols: [p_partkey] } + │ └─StreamExchange { dist: HashShard(p_partkey) } + │ └─StreamProject { exprs: [p_partkey] } + │ └─StreamShare { id: 26 } + │ └─StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey, output: [p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, _row_id, ps_suppkey] } + │ ├─StreamExchange { dist: HashShard(n_nationkey) } + │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } + │ │ ├─StreamExchange { dist: HashShard(r_regionkey) } + │ │ │ └─StreamShare { id: 3 } + │ │ │ └─StreamProject { exprs: [r_regionkey, _row_id] } + │ │ │ └─StreamRowIdGen { row_id_index: 3 } + │ │ │ └─StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } + │ │ └─StreamExchange { dist: HashShard(n_regionkey) } + │ │ └─StreamShare { id: 7 } + │ │ └─StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } + │ │ └─StreamRowIdGen { row_id_index: 4 } + │ │ └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(s_nationkey) } + │ └─StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [p_partkey, p_mfgr, ps_supplycost, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id, _row_id, ps_suppkey, _row_id] } + │ ├─StreamExchange { dist: HashShard(ps_suppkey) } + │ │ └─StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = ps_partkey, output: [p_partkey, p_mfgr, ps_suppkey, ps_supplycost, _row_id, _row_id] } + │ │ ├─StreamExchange { dist: HashShard(p_partkey) } + │ │ │ └─StreamRowIdGen { row_id_index: 9 } + │ │ │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } + │ │ └─StreamExchange { dist: HashShard(ps_partkey) } + │ │ └─StreamShare { id: 15 } + │ │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } + │ │ └─StreamRowIdGen { row_id_index: 5 } + │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(s_suppkey) } + │ └─StreamShare { id: 21 } + │ └─StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + │ └─StreamRowIdGen { row_id_index: 7 } + │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + └─StreamExchange { dist: HashShard(ps_partkey) } + └─StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [ps_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, s_nationkey, _row_id, _row_id, r_regionkey] } + ├─StreamExchange { dist: HashShard(s_nationkey) } + │ └─StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [ps_partkey, ps_supplycost, s_nationkey, _row_id, ps_suppkey, _row_id] } + │ ├─StreamExchange { dist: HashShard(ps_suppkey) } + │ │ └─StreamFilter { predicate: IsNotNull(ps_partkey) } + │ │ └─StreamShare { id: 15 } + │ │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } + │ │ └─StreamRowIdGen { row_id_index: 5 } + │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(s_suppkey) } + │ └─StreamShare { id: 21 } + │ └─StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + │ └─StreamRowIdGen { row_id_index: 7 } + │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + └─StreamExchange { dist: HashShard(n_nationkey) } + └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, _row_id, r_regionkey, _row_id] } + ├─StreamExchange { dist: HashShard(r_regionkey) } + │ └─StreamShare { id: 3 } + │ └─StreamProject { exprs: [r_regionkey, _row_id] } + │ └─StreamRowIdGen { row_id_index: 3 } + │ └─StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } + └─StreamExchange { dist: HashShard(n_regionkey) } + └─StreamShare { id: 7 } + └─StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } + └─StreamRowIdGen { row_id_index: 4 } + └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id(hidden), _row_id#1(hidden), r_regionkey(hidden), _row_id#2(hidden), _row_id#3(hidden), _row_id#4(hidden), ps_suppkey(hidden), n_nationkey(hidden), ps_supplycost(hidden), p_partkey#1(hidden)], stream_key: [_row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, p_partkey, _row_id#4, ps_suppkey, n_nationkey, p_partkey#1, ps_supplycost], pk_columns: [s_acctbal, n_name, s_name, p_partkey, _row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, _row_id#4, ps_suppkey, n_nationkey, p_partkey#1, ps_supplycost], pk_conflict: NoCheck } + StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id(hidden), _row_id#1(hidden), r_regionkey(hidden), _row_id#2(hidden), _row_id#3(hidden), _row_id#4(hidden), ps_suppkey(hidden), n_nationkey(hidden), ps_supplycost(hidden), p_partkey#1(hidden)], stream_key: [_row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, p_partkey, _row_id#4, ps_suppkey, n_nationkey, ps_supplycost], pk_columns: [s_acctbal, n_name, s_name, p_partkey, _row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, _row_id#4, ps_suppkey, n_nationkey, ps_supplycost], pk_conflict: NoCheck } ├── materialized table: 4294967294 - └── StreamHashJoin { type: Inner, predicate: p_partkey IS NOT DISTINCT FROM p_partkey AND ps_supplycost = min(ps_supplycost), output: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id, _row_id, r_regionkey, _row_id, _row_id, _row_id, ps_suppkey, n_nationkey, ps_supplycost, p_partkey] } { left table: 0, right table: 2, left degree table: 1, right degree table: 3 } - ├── StreamExchange Hash([0]) from 1 - └── StreamProject { exprs: [p_partkey, min(ps_supplycost)] } - └── StreamHashAgg { group_key: [p_partkey], aggs: [min(ps_supplycost), count] } { result table: 26, state tables: [ 25 ], distinct tables: [] } - └── StreamHashJoin { type: LeftOuter, predicate: p_partkey IS NOT DISTINCT FROM ps_partkey, output: [p_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, _row_id, _row_id, r_regionkey, s_nationkey] } { left table: 27, right table: 29, left degree table: 28, right degree table: 30 } - ├── StreamAppendOnlyDedup { dedup_cols: [p_partkey] } { state table: 31 } - │ └── StreamExchange Hash([0]) from 15 - └── StreamExchange Hash([0]) from 16 + └── StreamExchange Hash([3, 8, 9, 10, 11, 12, 13, 14, 15, 16]) from 1 Fragment 1 - StreamNoOp - └── StreamExchange NoShuffle from 2 + StreamHashJoin { type: Inner, predicate: p_partkey IS NOT DISTINCT FROM p_partkey AND ps_supplycost = min(ps_supplycost), output: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id, _row_id, r_regionkey, _row_id, _row_id, _row_id, ps_suppkey, n_nationkey, ps_supplycost, p_partkey] } { left table: 0, right table: 2, left degree table: 1, right degree table: 3 } + ├── StreamExchange Hash([0]) from 2 + └── StreamProject { exprs: [p_partkey, min(ps_supplycost)] } + └── StreamHashAgg { group_key: [p_partkey], aggs: [min(ps_supplycost), count] } { intermediate state table: 26, state tables: [ 25 ], distinct tables: [] } + └── StreamHashJoin { type: LeftOuter, predicate: p_partkey IS NOT DISTINCT FROM ps_partkey, output: [p_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, _row_id, _row_id, r_regionkey, s_nationkey] } { left table: 27, right table: 29, left degree table: 28, right degree table: 30 } + ├── StreamAppendOnlyDedup { dedup_cols: [p_partkey] } { state table: 31 } + │ └── StreamExchange Hash([0]) from 16 + └── StreamExchange Hash([0]) from 17 Fragment 2 - StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey, output: [p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, _row_id, ps_suppkey] } { left table: 4, right table: 6, left degree table: 5, right degree table: 7 } - ├── StreamExchange Hash([0]) from 3 - └── StreamExchange Hash([5]) from 8 + StreamNoOp + └── StreamExchange NoShuffle from 3 Fragment 3 - StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } { left table: 8, right table: 10, left degree table: 9, right degree table: 11 } + StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey, output: [p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, _row_id, ps_suppkey] } { left table: 4, right table: 6, left degree table: 5, right degree table: 7 } ├── StreamExchange Hash([0]) from 4 - └── StreamExchange Hash([2]) from 6 + └── StreamExchange Hash([5]) from 9 Fragment 4 - StreamNoOp - └── StreamExchange NoShuffle from 5 + StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } { left table: 8, right table: 10, left degree table: 9, right degree table: 11 } + ├── StreamExchange Hash([0]) from 5 + └── StreamExchange Hash([2]) from 7 Fragment 5 + StreamNoOp + └── StreamExchange NoShuffle from 6 + + Fragment 6 StreamProject { exprs: [r_regionkey, _row_id] } └── StreamRowIdGen { row_id_index: 3 } └── StreamSource { source: region, columns: [r_regionkey, r_name, r_comment, _row_id] } { source state table: 12 } - Fragment 6 + Fragment 7 StreamNoOp - └── StreamExchange NoShuffle from 7 + └── StreamExchange NoShuffle from 8 - Fragment 7 + Fragment 8 StreamProject { exprs: [n_nationkey, n_name, n_regionkey, _row_id] } └── StreamRowIdGen { row_id_index: 4 } └── StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } { source state table: 13 } - Fragment 8 + Fragment 9 StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [p_partkey, p_mfgr, ps_supplycost, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id, _row_id, ps_suppkey, _row_id] } { left table: 14, right table: 16, left degree table: 15, right degree table: 17 } - ├── StreamExchange Hash([2]) from 9 - └── StreamExchange Hash([0]) from 13 + ├── StreamExchange Hash([2]) from 10 + └── StreamExchange Hash([0]) from 14 - Fragment 9 + Fragment 10 StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = ps_partkey, output: [p_partkey, p_mfgr, ps_suppkey, ps_supplycost, _row_id, _row_id] } { left table: 18, right table: 20, left degree table: 19, right degree table: 21 } - ├── StreamExchange Hash([0]) from 10 - └── StreamExchange Hash([0]) from 11 + ├── StreamExchange Hash([0]) from 11 + └── StreamExchange Hash([0]) from 12 - Fragment 10 + Fragment 11 StreamRowIdGen { row_id_index: 9 } └── StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } { source state table: 22 } - Fragment 11 + Fragment 12 StreamNoOp - └── StreamExchange NoShuffle from 12 + └── StreamExchange NoShuffle from 13 - Fragment 12 + Fragment 13 StreamProject { exprs: [ps_partkey, ps_suppkey, ps_supplycost, _row_id] } └── StreamRowIdGen { row_id_index: 5 } └── StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } { source state table: 23 } - Fragment 13 + Fragment 14 StreamNoOp - └── StreamExchange NoShuffle from 14 + └── StreamExchange NoShuffle from 15 - Fragment 14 + Fragment 15 StreamProject { exprs: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } └── StreamRowIdGen { row_id_index: 7 } └── StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } { source state table: 24 } - Fragment 15 + Fragment 16 StreamProject { exprs: [p_partkey] } - └── StreamExchange NoShuffle from 2 + └── StreamExchange NoShuffle from 3 - Fragment 16 + Fragment 17 StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [ps_partkey, ps_supplycost, _row_id, _row_id, ps_suppkey, s_nationkey, _row_id, _row_id, r_regionkey] } { left table: 32, right table: 34, left degree table: 33, right degree table: 35 } - ├── StreamExchange Hash([2]) from 17 - └── StreamExchange Hash([0]) from 20 + ├── StreamExchange Hash([2]) from 18 + └── StreamExchange Hash([0]) from 21 - Fragment 17 + Fragment 18 StreamHashJoin [append_only] { type: Inner, predicate: ps_suppkey = s_suppkey, output: [ps_partkey, ps_supplycost, s_nationkey, _row_id, ps_suppkey, _row_id] } { left table: 36, right table: 38, left degree table: 37, right degree table: 39 } - ├── StreamExchange Hash([1]) from 18 - └── StreamExchange Hash([0]) from 19 + ├── StreamExchange Hash([1]) from 19 + └── StreamExchange Hash([0]) from 20 - Fragment 18 + Fragment 19 StreamFilter { predicate: IsNotNull(ps_partkey) } - └── StreamExchange NoShuffle from 12 + └── StreamExchange NoShuffle from 13 - Fragment 19 + Fragment 20 StreamNoOp - └── StreamExchange NoShuffle from 14 + └── StreamExchange NoShuffle from 15 - Fragment 20 + Fragment 21 StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, _row_id, r_regionkey, _row_id] } { left table: 40, right table: 42, left degree table: 41, right degree table: 43 } - ├── StreamExchange Hash([0]) from 21 - └── StreamExchange Hash([2]) from 22 + ├── StreamExchange Hash([0]) from 22 + └── StreamExchange Hash([2]) from 23 - Fragment 21 + Fragment 22 StreamNoOp - └── StreamExchange NoShuffle from 5 + └── StreamExchange NoShuffle from 6 - Fragment 22 + Fragment 23 StreamNoOp - └── StreamExchange NoShuffle from 7 + └── StreamExchange NoShuffle from 8 Table 0 { columns: [ p_partkey, p_mfgr, s_name, s_address, s_phone, s_acctbal, s_comment, ps_supplycost, n_name, _row_id, _row_id_0, r_regionkey, n_nationkey, _row_id_1, _row_id_2, _row_id_3, ps_suppkey ], primary key: [ $0 ASC, $7 ASC, $9 ASC, $10 ASC, $11 ASC, $13 ASC, $14 ASC, $15 ASC, $16 ASC, $12 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ], distribution key: [ 0 ], read pk prefix len hint: 2 } @@ -540,7 +544,7 @@ Table 43 { columns: [ n_regionkey, _row_id, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 4294967294 { columns: [ s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, _row_id#4, ps_suppkey, n_nationkey, ps_supplycost, p_partkey#1 ], primary key: [ $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $12 ASC, $13 ASC, $14 ASC, $15 ASC, $17 ASC, $16 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ], distribution key: [ 3 ], read pk prefix len hint: 11 } + Table 4294967294 { columns: [ s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, _row_id, _row_id#1, r_regionkey, _row_id#2, _row_id#3, _row_id#4, ps_suppkey, n_nationkey, ps_supplycost, p_partkey#1 ], primary key: [ $0 DESC, $2 ASC, $1 ASC, $3 ASC, $8 ASC, $9 ASC, $10 ASC, $11 ASC, $12 ASC, $13 ASC, $14 ASC, $15 ASC, $16 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ], distribution key: [ 3, 8, 9, 10, 11, 12, 13, 14, 15, 16 ], read pk prefix len hint: 13 } - id: tpch_q5 before: @@ -629,7 +633,7 @@ └─StreamProject { exprs: [n_name, sum($expr1)] } └─StreamHashAgg [append_only] { group_key: [n_name], aggs: [sum($expr1), count] } └─StreamExchange { dist: HashShard(n_name) } - └─StreamProject { exprs: [n_name, (l_extendedprice * (1:Decimal - l_discount)) as $expr1, _row_id, _row_id, r_regionkey, _row_id, _row_id, o_custkey, _row_id, _row_id, l_suppkey, o_orderkey, c_nationkey, n_nationkey] } + └─StreamProject { exprs: [n_name, (l_extendedprice * (1:Decimal - l_discount)) as $expr1, _row_id, _row_id, r_regionkey, _row_id, _row_id, o_custkey, _row_id, _row_id, l_suppkey, o_orderkey, n_nationkey] } └─StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey AND n_nationkey = c_nationkey, output: [l_extendedprice, l_discount, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, o_custkey, _row_id, _row_id, l_suppkey, o_orderkey, c_nationkey] } ├─StreamExchange { dist: HashShard(n_nationkey, n_nationkey) } │ └─StreamHashJoin [append_only] { type: Inner, predicate: r_regionkey = n_regionkey, output: [n_nationkey, n_name, _row_id, r_regionkey, _row_id] } @@ -664,13 +668,13 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [n_name, sum($expr1)] } └── StreamHashAgg [append_only] { group_key: [n_name], aggs: [sum($expr1), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamProject { exprs: [n_name, (l_extendedprice * (1:Decimal - l_discount)) as $expr1, _row_id, _row_id, r_regionkey, _row_id, _row_id, o_custkey, _row_id, _row_id, l_suppkey, o_orderkey, c_nationkey, n_nationkey] } + StreamProject { exprs: [n_name, (l_extendedprice * (1:Decimal - l_discount)) as $expr1, _row_id, _row_id, r_regionkey, _row_id, _row_id, o_custkey, _row_id, _row_id, l_suppkey, o_orderkey, n_nationkey] } └── StreamHashJoin [append_only] { type: Inner, predicate: n_nationkey = s_nationkey AND n_nationkey = c_nationkey, output: [l_extendedprice, l_discount, n_name, _row_id, _row_id, r_regionkey, n_nationkey, _row_id, _row_id, o_custkey, _row_id, _row_id, l_suppkey, o_orderkey, c_nationkey] } ├── left table: 1 ├── right table: 3 @@ -797,7 +801,7 @@ Table 26 { columns: [ partition_id, offset_info ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - Table 4294967294 { columns: [ n_name, revenue ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 4294967294 { columns: [ n_name, revenue ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 2 } - id: tpch_q7 before: @@ -937,7 +941,7 @@ StreamMaterialize { columns: [supp_nation, cust_nation, l_year, revenue], stream_key: [supp_nation, cust_nation, l_year], pk_columns: [supp_nation, cust_nation, l_year], pk_conflict: NoCheck } ├── materialized table: 4294967294 └── StreamProject { exprs: [n_name, n_name, $expr1, sum($expr2)] } - └── StreamHashAgg [append_only] { group_key: [n_name, n_name, $expr1], aggs: [sum($expr2), count] } { result table: 0, state tables: [], distinct tables: [] } + └── StreamHashAgg [append_only] { group_key: [n_name, n_name, $expr1], aggs: [sum($expr2), count] } { intermediate state table: 0, state tables: [], distinct tables: [] } └── StreamExchange Hash([0, 1, 2]) from 1 Fragment 1 @@ -1243,7 +1247,7 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [$expr1, RoundDigit((sum($expr3) / sum($expr2)), 6:Int32) as $expr4] } └── StreamHashAgg [append_only] { group_key: [$expr1], aggs: [sum($expr3), sum($expr2), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0]) from 1 @@ -1524,7 +1528,7 @@ └─StreamProject { exprs: [n_name, $expr1, RoundDigit(sum($expr2), 2:Int32) as $expr3] } └─StreamHashAgg [append_only] { group_key: [n_name, $expr1], aggs: [sum($expr2), count] } └─StreamExchange { dist: HashShard(n_name, $expr1) } - └─StreamProject { exprs: [n_name, Extract('YEAR':Varchar, o_orderdate) as $expr1, ((l_extendedprice * (1:Decimal - l_discount)) - (ps_supplycost * l_quantity)) as $expr2, _row_id, _row_id, p_partkey, _row_id, _row_id, n_nationkey, _row_id, _row_id, o_orderkey, s_suppkey, ps_suppkey, ps_partkey] } + └─StreamProject { exprs: [n_name, Extract('YEAR':Varchar, o_orderdate) as $expr1, ((l_extendedprice * (1:Decimal - l_discount)) - (ps_supplycost * l_quantity)) as $expr2, _row_id, _row_id, p_partkey, _row_id, _row_id, n_nationkey, _row_id, _row_id, o_orderkey, ps_suppkey, ps_partkey] } └─StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = l_partkey AND ps_suppkey = l_suppkey AND ps_partkey = l_partkey AND ps_suppkey = s_suppkey, output: [l_quantity, l_extendedprice, l_discount, ps_supplycost, o_orderdate, n_name, _row_id, _row_id, p_partkey, ps_suppkey, ps_partkey, _row_id, _row_id, n_nationkey, _row_id, _row_id, o_orderkey, s_suppkey] } ├─StreamExchange { dist: HashShard(ps_suppkey) } │ └─StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = ps_partkey, output: [p_partkey, ps_partkey, ps_suppkey, ps_supplycost, _row_id, _row_id] } @@ -1559,13 +1563,13 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [n_name, $expr1, RoundDigit(sum($expr2), 2:Int32) as $expr3] } └── StreamHashAgg [append_only] { group_key: [n_name, $expr1], aggs: [sum($expr2), count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0, 1]) from 1 Fragment 1 - StreamProject { exprs: [n_name, Extract('YEAR':Varchar, o_orderdate) as $expr1, ((l_extendedprice * (1:Decimal - l_discount)) - (ps_supplycost * l_quantity)) as $expr2, _row_id, _row_id, p_partkey, _row_id, _row_id, n_nationkey, _row_id, _row_id, o_orderkey, s_suppkey, ps_suppkey, ps_partkey] } + StreamProject { exprs: [n_name, Extract('YEAR':Varchar, o_orderdate) as $expr1, ((l_extendedprice * (1:Decimal - l_discount)) - (ps_supplycost * l_quantity)) as $expr2, _row_id, _row_id, p_partkey, _row_id, _row_id, n_nationkey, _row_id, _row_id, o_orderkey, ps_suppkey, ps_partkey] } └── StreamHashJoin [append_only] { type: Inner, predicate: p_partkey = l_partkey AND ps_suppkey = l_suppkey AND ps_partkey = l_partkey AND ps_suppkey = s_suppkey, output: [l_quantity, l_extendedprice, l_discount, ps_supplycost, o_orderdate, n_name, _row_id, _row_id, p_partkey, ps_suppkey, ps_partkey, _row_id, _row_id, n_nationkey, _row_id, _row_id, o_orderkey, s_suppkey] } ├── left table: 1 ├── right table: 3 @@ -1797,110 +1801,114 @@ └─BatchSource { source: lineitem, columns: [l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment, _row_id], filter: (None, None) } stream_plan: |- StreamMaterialize { columns: [s_name, s_address, _row_id(hidden), _row_id#1(hidden), s_nationkey(hidden), s_suppkey(hidden)], stream_key: [_row_id, _row_id#1, s_nationkey, s_suppkey], pk_columns: [s_name, _row_id, _row_id#1, s_nationkey, s_suppkey], pk_conflict: NoCheck } - └─StreamHashJoin { type: LeftSemi, predicate: s_suppkey = ps_suppkey, output: [s_name, s_address, _row_id, _row_id, s_nationkey, s_suppkey] } - ├─StreamExchange { dist: HashShard(s_suppkey) } - │ └─StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [s_suppkey, s_name, s_address, _row_id, s_nationkey, _row_id] } - │ ├─StreamExchange { dist: HashShard(s_nationkey) } - │ │ └─StreamRowIdGen { row_id_index: 7 } - │ │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(n_nationkey) } - │ └─StreamRowIdGen { row_id_index: 4 } - │ └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } - └─StreamExchange { dist: HashShard(ps_suppkey) } - └─StreamProject { exprs: [ps_suppkey, _row_id, ps_partkey, ps_partkey, ps_suppkey] } - └─StreamFilter { predicate: ($expr1 > $expr2) } - └─StreamHashJoin { type: Inner, predicate: ps_partkey IS NOT DISTINCT FROM ps_partkey AND ps_suppkey IS NOT DISTINCT FROM ps_suppkey, output: all } - ├─StreamExchange { dist: HashShard(ps_partkey, ps_suppkey) } - │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_availqty::Decimal as $expr1, _row_id] } - │ └─StreamShare { id: 13 } - │ └─StreamHashJoin { type: LeftSemi, predicate: ps_partkey = p_partkey, output: [ps_partkey, ps_suppkey, ps_availqty, _row_id] } - │ ├─StreamExchange { dist: HashShard(ps_partkey) } - │ │ └─StreamRowIdGen { row_id_index: 5 } - │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(p_partkey) } - │ └─StreamProject { exprs: [p_partkey, _row_id] } - │ └─StreamRowIdGen { row_id_index: 9 } - │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } - └─StreamProject { exprs: [ps_partkey, ps_suppkey, (0.5:Decimal * sum(l_quantity)) as $expr2] } - └─StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [sum(l_quantity), count] } - └─StreamHashJoin { type: LeftOuter, predicate: ps_partkey IS NOT DISTINCT FROM l_partkey AND ps_suppkey IS NOT DISTINCT FROM l_suppkey, output: [ps_partkey, ps_suppkey, l_quantity, _row_id] } - ├─StreamExchange { dist: HashShard(ps_partkey, ps_suppkey) } - │ └─StreamProject { exprs: [ps_partkey, ps_suppkey] } - │ └─StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [count] } - │ └─StreamShare { id: 13 } - │ └─StreamHashJoin { type: LeftSemi, predicate: ps_partkey = p_partkey, output: [ps_partkey, ps_suppkey, ps_availqty, _row_id] } - │ ├─StreamExchange { dist: HashShard(ps_partkey) } - │ │ └─StreamRowIdGen { row_id_index: 5 } - │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } - │ └─StreamExchange { dist: HashShard(p_partkey) } - │ └─StreamProject { exprs: [p_partkey, _row_id] } - │ └─StreamRowIdGen { row_id_index: 9 } - │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } - └─StreamExchange { dist: HashShard(l_partkey, l_suppkey) } - └─StreamProject { exprs: [l_partkey, l_suppkey, l_quantity, _row_id] } - └─StreamFilter { predicate: IsNotNull(l_partkey) AND IsNotNull(l_suppkey) } - └─StreamRowIdGen { row_id_index: 16 } - └─StreamSource { source: lineitem, columns: [l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment, _row_id] } + └─StreamExchange { dist: HashShard(_row_id, _row_id, s_nationkey, s_suppkey) } + └─StreamHashJoin { type: LeftSemi, predicate: s_suppkey = ps_suppkey, output: [s_name, s_address, _row_id, _row_id, s_nationkey, s_suppkey] } + ├─StreamExchange { dist: HashShard(s_suppkey) } + │ └─StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [s_suppkey, s_name, s_address, _row_id, s_nationkey, _row_id] } + │ ├─StreamExchange { dist: HashShard(s_nationkey) } + │ │ └─StreamRowIdGen { row_id_index: 7 } + │ │ └─StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(n_nationkey) } + │ └─StreamRowIdGen { row_id_index: 4 } + │ └─StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } + └─StreamExchange { dist: HashShard(ps_suppkey) } + └─StreamProject { exprs: [ps_suppkey, _row_id, ps_partkey, ps_partkey, ps_suppkey] } + └─StreamFilter { predicate: ($expr1 > $expr2) } + └─StreamHashJoin { type: Inner, predicate: ps_partkey IS NOT DISTINCT FROM ps_partkey AND ps_suppkey IS NOT DISTINCT FROM ps_suppkey, output: all } + ├─StreamExchange { dist: HashShard(ps_partkey, ps_suppkey) } + │ └─StreamProject { exprs: [ps_partkey, ps_suppkey, ps_availqty::Decimal as $expr1, _row_id] } + │ └─StreamShare { id: 13 } + │ └─StreamHashJoin { type: LeftSemi, predicate: ps_partkey = p_partkey, output: [ps_partkey, ps_suppkey, ps_availqty, _row_id] } + │ ├─StreamExchange { dist: HashShard(ps_partkey) } + │ │ └─StreamRowIdGen { row_id_index: 5 } + │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(p_partkey) } + │ └─StreamProject { exprs: [p_partkey, _row_id] } + │ └─StreamRowIdGen { row_id_index: 9 } + │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } + └─StreamProject { exprs: [ps_partkey, ps_suppkey, (0.5:Decimal * sum(l_quantity)) as $expr2] } + └─StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [sum(l_quantity), count] } + └─StreamHashJoin { type: LeftOuter, predicate: ps_partkey IS NOT DISTINCT FROM l_partkey AND ps_suppkey IS NOT DISTINCT FROM l_suppkey, output: [ps_partkey, ps_suppkey, l_quantity, _row_id] } + ├─StreamExchange { dist: HashShard(ps_partkey, ps_suppkey) } + │ └─StreamProject { exprs: [ps_partkey, ps_suppkey] } + │ └─StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [count] } + │ └─StreamShare { id: 13 } + │ └─StreamHashJoin { type: LeftSemi, predicate: ps_partkey = p_partkey, output: [ps_partkey, ps_suppkey, ps_availqty, _row_id] } + │ ├─StreamExchange { dist: HashShard(ps_partkey) } + │ │ └─StreamRowIdGen { row_id_index: 5 } + │ │ └─StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } + │ └─StreamExchange { dist: HashShard(p_partkey) } + │ └─StreamProject { exprs: [p_partkey, _row_id] } + │ └─StreamRowIdGen { row_id_index: 9 } + │ └─StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } + └─StreamExchange { dist: HashShard(l_partkey, l_suppkey) } + └─StreamProject { exprs: [l_partkey, l_suppkey, l_quantity, _row_id] } + └─StreamFilter { predicate: IsNotNull(l_partkey) AND IsNotNull(l_suppkey) } + └─StreamRowIdGen { row_id_index: 16 } + └─StreamSource { source: lineitem, columns: [l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment, _row_id] } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_name, s_address, _row_id(hidden), _row_id#1(hidden), s_nationkey(hidden), s_suppkey(hidden)], stream_key: [_row_id, _row_id#1, s_nationkey, s_suppkey], pk_columns: [s_name, _row_id, _row_id#1, s_nationkey, s_suppkey], pk_conflict: NoCheck } ├── materialized table: 4294967294 - └── StreamHashJoin { type: LeftSemi, predicate: s_suppkey = ps_suppkey, output: [s_name, s_address, _row_id, _row_id, s_nationkey, s_suppkey] } { left table: 0, right table: 2, left degree table: 1, right degree table: 3 } - ├── StreamExchange Hash([0]) from 1 - └── StreamExchange Hash([0]) from 4 + └── StreamExchange Hash([2, 3, 4, 5]) from 1 Fragment 1 - StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [s_suppkey, s_name, s_address, _row_id, s_nationkey, _row_id] } { left table: 4, right table: 6, left degree table: 5, right degree table: 7 } - ├── StreamExchange Hash([3]) from 2 - └── StreamExchange Hash([0]) from 3 + StreamHashJoin { type: LeftSemi, predicate: s_suppkey = ps_suppkey, output: [s_name, s_address, _row_id, _row_id, s_nationkey, s_suppkey] } { left table: 0, right table: 2, left degree table: 1, right degree table: 3 } + ├── StreamExchange Hash([0]) from 2 + └── StreamExchange Hash([0]) from 5 Fragment 2 + StreamHashJoin [append_only] { type: Inner, predicate: s_nationkey = n_nationkey, output: [s_suppkey, s_name, s_address, _row_id, s_nationkey, _row_id] } { left table: 4, right table: 6, left degree table: 5, right degree table: 7 } + ├── StreamExchange Hash([3]) from 3 + └── StreamExchange Hash([0]) from 4 + + Fragment 3 StreamRowIdGen { row_id_index: 7 } └── StreamSource { source: supplier, columns: [s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id] } { source state table: 8 } - Fragment 3 + Fragment 4 StreamRowIdGen { row_id_index: 4 } └── StreamSource { source: nation, columns: [n_nationkey, n_name, n_regionkey, n_comment, _row_id] } { source state table: 9 } - Fragment 4 + Fragment 5 StreamProject { exprs: [ps_suppkey, _row_id, ps_partkey, ps_partkey, ps_suppkey] } └── StreamFilter { predicate: ($expr1 > $expr2) } └── StreamHashJoin { type: Inner, predicate: ps_partkey IS NOT DISTINCT FROM ps_partkey AND ps_suppkey IS NOT DISTINCT FROM ps_suppkey, output: all } { left table: 10, right table: 12, left degree table: 11, right degree table: 13 } - ├── StreamExchange Hash([0, 1]) from 5 + ├── StreamExchange Hash([0, 1]) from 6 └── StreamProject { exprs: [ps_partkey, ps_suppkey, (0.5:Decimal * sum(l_quantity)) as $expr2] } - └── StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [sum(l_quantity), count] } { result table: 20, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [sum(l_quantity), count] } { intermediate state table: 20, state tables: [], distinct tables: [] } └── StreamHashJoin { type: LeftOuter, predicate: ps_partkey IS NOT DISTINCT FROM l_partkey AND ps_suppkey IS NOT DISTINCT FROM l_suppkey, output: [ps_partkey, ps_suppkey, l_quantity, _row_id] } ├── left table: 21 ├── right table: 23 ├── left degree table: 22 ├── right degree table: 24 - ├── StreamExchange Hash([0, 1]) from 9 - └── StreamExchange Hash([0, 1]) from 10 + ├── StreamExchange Hash([0, 1]) from 10 + └── StreamExchange Hash([0, 1]) from 11 - Fragment 5 + Fragment 6 StreamProject { exprs: [ps_partkey, ps_suppkey, ps_availqty::Decimal as $expr1, _row_id] } - └── StreamExchange NoShuffle from 6 + └── StreamExchange NoShuffle from 7 - Fragment 6 + Fragment 7 StreamHashJoin { type: LeftSemi, predicate: ps_partkey = p_partkey, output: [ps_partkey, ps_suppkey, ps_availqty, _row_id] } { left table: 14, right table: 16, left degree table: 15, right degree table: 17 } - ├── StreamExchange Hash([0]) from 7 - └── StreamExchange Hash([0]) from 8 + ├── StreamExchange Hash([0]) from 8 + └── StreamExchange Hash([0]) from 9 - Fragment 7 + Fragment 8 StreamRowIdGen { row_id_index: 5 } └── StreamSource { source: partsupp, columns: [ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, ps_comment, _row_id] } { source state table: 18 } - Fragment 8 + Fragment 9 StreamProject { exprs: [p_partkey, _row_id] } └── StreamRowIdGen { row_id_index: 9 } └── StreamSource { source: part, columns: [p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment, _row_id] } { source state table: 19 } - Fragment 9 + Fragment 10 StreamProject { exprs: [ps_partkey, ps_suppkey] } - └── StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [count] } { result table: 25, state tables: [], distinct tables: [] } - └── StreamExchange NoShuffle from 6 + └── StreamHashAgg { group_key: [ps_partkey, ps_suppkey], aggs: [count] } { intermediate state table: 25, state tables: [], distinct tables: [] } + └── StreamExchange NoShuffle from 7 - Fragment 10 + Fragment 11 StreamProject { exprs: [l_partkey, l_suppkey, l_quantity, _row_id] } └── StreamFilter { predicate: IsNotNull(l_partkey) AND IsNotNull(l_suppkey) } └── StreamRowIdGen { row_id_index: 16 } @@ -1911,9 +1919,9 @@ Table 1 { columns: [ s_suppkey, _row_id, _row_id_0, s_nationkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 2 { columns: [ ps_suppkey, _row_id, ps_partkey, ps_partkey_0, ps_suppkey_0 ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 2 { columns: [ ps_suppkey, _row_id, ps_partkey, ps_partkey_0, ps_suppkey_0 ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 3 { columns: [ ps_suppkey, _row_id, ps_partkey, ps_partkey_0, ps_suppkey_0, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 5 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 3 { columns: [ ps_suppkey, _row_id, ps_partkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 4 { columns: [ s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, _row_id ], primary key: [ $3 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [ 3 ], read pk prefix len hint: 1 } @@ -1961,7 +1969,7 @@ Table 26 { columns: [ partition_id, offset_info ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - Table 4294967294 { columns: [ s_name, s_address, _row_id, _row_id#1, s_nationkey, s_suppkey ], primary key: [ $0 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 5 ], read pk prefix len hint: 4 } + Table 4294967294 { columns: [ s_name, s_address, _row_id, _row_id#1, s_nationkey, s_suppkey ], primary key: [ $0 ASC, $2 ASC, $3 ASC, $4 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 2, 3, 4, 5 ], read pk prefix len hint: 5 } - id: tpch_q21 before: @@ -2113,7 +2121,10 @@ Fragment 0 StreamMaterialize { columns: [s_name, numwait], stream_key: [s_name], pk_columns: [numwait, s_name], pk_conflict: NoCheck } ├── materialized table: 4294967294 - └── StreamHashAgg { group_key: [s_name], aggs: [count] } { result table: 0, state tables: [], distinct tables: [] } + └── StreamHashAgg { group_key: [s_name], aggs: [count] } + ├── intermediate state table: 0 + ├── state tables: [] + ├── distinct tables: [] └── StreamExchange Hash([0]) from 1 Fragment 1 @@ -2264,5 +2275,5 @@ Table 24 { columns: [ partition_id, offset_info ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 1 } - Table 4294967294 { columns: [ s_name, numwait ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } + Table 4294967294 { columns: [ s_name, numwait ], primary key: [ $1 DESC, $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 2 } diff --git a/src/frontend/planner_test/tests/testdata/output/union.yaml b/src/frontend/planner_test/tests/testdata/output/union.yaml index 6c86e00aa2f54..14e7b7e65cb70 100644 --- a/src/frontend/planner_test/tests/testdata/output/union.yaml +++ b/src/frontend/planner_test/tests/testdata/output/union.yaml @@ -10,39 +10,56 @@ └─BatchExchange { order: [], dist: Single } └─BatchScan { table: t2, columns: [t2.a, t2.b, t2.c], distribution: SomeShard } stream_plan: |- - StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), null:Serial(hidden), 0:Int32(hidden)], stream_key: [t1._row_id, null:Serial, 0:Int32], pk_columns: [t1._row_id, null:Serial, 0:Int32], pk_conflict: NoCheck } + StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck } └─StreamUnion { all: true } - ├─StreamExchange { dist: HashShard(t1._row_id, null:Serial, 0:Int32) } - │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, null:Serial, 0:Int32] } + ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } + │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(null:Serial, t2._row_id, 1:Int32) } - └─StreamProject { exprs: [t2.a, t2.b, t2.c, null:Serial, t2._row_id, 1:Int32] } + └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } + └─StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), null:Serial(hidden), 0:Int32(hidden)], stream_key: [t1._row_id, null:Serial, 0:Int32], pk_columns: [t1._row_id, null:Serial, 0:Int32], pk_conflict: NoCheck } + StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck } ├── materialized table: 4294967294 └── StreamUnion { all: true } - ├── StreamExchange Hash([3, 4, 5]) from 1 - └── StreamExchange Hash([3, 4, 5]) from 2 + ├── StreamExchange Hash([3, 4]) from 1 + └── StreamExchange Hash([3, 4]) from 2 Fragment 1 - StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, null:Serial, 0:Int32] } + StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } └── Chain { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } { state table: 0 } ├── Upstream └── BatchPlanNode Fragment 2 - StreamProject { exprs: [t2.a, t2.b, t2.c, null:Serial, t2._row_id, 1:Int32] } + StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } └── Chain { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } { state table: 1 } ├── Upstream └── BatchPlanNode - Table 0 { columns: [ vnode, _row_id, t1_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 0 + ├── columns: [ vnode, _row_id, t1_backfill_finished, t1_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 - Table 1 { columns: [ vnode, _row_id, t2_backfill_finished ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + Table 1 + ├── columns: [ vnode, _row_id, t2_backfill_finished, t2_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 - Table 4294967294 { columns: [ a, b, c, t1._row_id, null:Serial, 0:Int32 ], primary key: [ $3 ASC, $4 ASC, $5 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 3, 4, 5 ], read pk prefix len hint: 3 } + Table 4294967294 + ├── columns: [ a, b, c, t1._row_id, $src ] + ├── primary key: [ $3 ASC, $4 ASC ] + ├── value indices: [ 0, 1, 2, 3, 4 ] + ├── distribution key: [ 3, 4 ] + └── read pk prefix len hint: 2 - sql: | create table t1 (a int, b numeric, c bigint); @@ -68,11 +85,11 @@ └─StreamHashAgg { group_key: [t1.a, t1.b, t1.c], aggs: [count] } └─StreamExchange { dist: HashShard(t1.a, t1.b, t1.c) } └─StreamUnion { all: true } - ├─StreamExchange { dist: HashShard(t1._row_id, null:Serial, 0:Int32) } - │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, null:Serial, 0:Int32] } + ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } + │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(null:Serial, t2._row_id, 1:Int32) } - └─StreamProject { exprs: [t2.a, t2.b, t2.c, null:Serial, t2._row_id, 1:Int32] } + └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } + └─StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } stream_dist_plan: |+ Fragment 0 @@ -80,25 +97,25 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [t1.a, t1.b, t1.c] } └── StreamHashAgg { group_key: [t1.a, t1.b, t1.c], aggs: [count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0, 1, 2]) from 1 Fragment 1 StreamUnion { all: true } - ├── StreamExchange Hash([3, 4, 5]) from 2 - └── StreamExchange Hash([3, 4, 5]) from 3 + ├── StreamExchange Hash([3, 4]) from 2 + └── StreamExchange Hash([3, 4]) from 3 Fragment 2 - StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, null:Serial, 0:Int32] } + StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } └── Chain { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── state table: 1 ├── Upstream └── BatchPlanNode Fragment 3 - StreamProject { exprs: [t2.a, t2.b, t2.c, null:Serial, t2._row_id, 1:Int32] } + StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } └── Chain { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├── state table: 2 ├── Upstream @@ -112,17 +129,17 @@ └── read pk prefix len hint: 3 Table 1 - ├── columns: [ vnode, _row_id, t1_backfill_finished ] + ├── columns: [ vnode, _row_id, t1_backfill_finished, t1_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 Table 2 - ├── columns: [ vnode, _row_id, t2_backfill_finished ] + ├── columns: [ vnode, _row_id, t2_backfill_finished, t2_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 @@ -170,7 +187,7 @@ ├── materialized table: 4294967294 └── StreamProject { exprs: [t1.a, t1.b, t1.c] } └── StreamHashAgg { group_key: [t1.a, t1.b, t1.c], aggs: [count] } - ├── result table: 0 + ├── intermediate state table: 0 ├── state tables: [] ├── distinct tables: [] └── StreamExchange Hash([0, 1, 2]) from 1 @@ -202,17 +219,17 @@ └── read pk prefix len hint: 3 Table 1 - ├── columns: [ vnode, a, t1_backfill_finished ] + ├── columns: [ vnode, a, t1_backfill_finished, t1_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 Table 2 - ├── columns: [ vnode, a, t2_backfill_finished ] + ├── columns: [ vnode, a, t2_backfill_finished, t2_row_count ] ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] + ├── value indices: [ 1, 2, 3 ] ├── distribution key: [ 0 ] ├── read pk prefix len hint: 1 └── vnode column idx: 0 @@ -292,3 +309,347 @@ └─BatchHashAgg { group_key: [1:Int32], aggs: [] } └─BatchExchange { order: [], dist: HashShard(1:Int32) } └─BatchValues { rows: [[1:Int32], [2:Int32], [3:Int32], [4:Int32], [5:Int32], [5:Int32]] } +- name: test merged union stream key (2 columns, row_id + src_col) + sql: | + create table t1 (a int, b numeric, c bigint); + create table t2 (a int, b numeric, c bigint); + create table t3 (a int, b numeric, c bigint); + create table t4 (a int, b numeric, c bigint); + create table t5 (a int, b numeric, c bigint); + select * from t1 union all select * from t2 union all select * from t3 union all select * from t4 union all select * from t5; + batch_plan: |- + BatchUnion { all: true } + ├─BatchExchange { order: [], dist: Single } + │ └─BatchScan { table: t1, columns: [t1.a, t1.b, t1.c], distribution: SomeShard } + ├─BatchExchange { order: [], dist: Single } + │ └─BatchScan { table: t2, columns: [t2.a, t2.b, t2.c], distribution: SomeShard } + ├─BatchExchange { order: [], dist: Single } + │ └─BatchScan { table: t3, columns: [t3.a, t3.b, t3.c], distribution: SomeShard } + ├─BatchExchange { order: [], dist: Single } + │ └─BatchScan { table: t4, columns: [t4.a, t4.b, t4.c], distribution: SomeShard } + └─BatchExchange { order: [], dist: Single } + └─BatchScan { table: t5, columns: [t5.a, t5.b, t5.c], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck } + └─StreamUnion { all: true } + ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } + │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } + │ └─StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } + │ └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + ├─StreamExchange { dist: HashShard(t3._row_id, 2:Int32) } + │ └─StreamProject { exprs: [t3.a, t3.b, t3.c, t3._row_id, 2:Int32] } + │ └─StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + ├─StreamExchange { dist: HashShard(t4._row_id, 3:Int32) } + │ └─StreamProject { exprs: [t4.a, t4.b, t4.c, t4._row_id, 3:Int32] } + │ └─StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + └─StreamExchange { dist: HashShard(t5._row_id, 4:Int32) } + └─StreamProject { exprs: [t5.a, t5.b, t5.c, t5._row_id, 4:Int32] } + └─StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c, t5._row_id], pk: [t5._row_id], dist: UpstreamHashShard(t5._row_id) } + stream_dist_plan: |+ + Fragment 0 + StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck } + ├── materialized table: 4294967294 + └── StreamUnion { all: true } + ├── StreamExchange Hash([3, 4]) from 1 + ├── StreamExchange Hash([3, 4]) from 2 + ├── StreamExchange Hash([3, 4]) from 3 + ├── StreamExchange Hash([3, 4]) from 4 + └── StreamExchange Hash([3, 4]) from 5 + + Fragment 1 + StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } + └── Chain { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } { state table: 0 } + ├── Upstream + └── BatchPlanNode + + Fragment 2 + StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } + └── Chain { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } { state table: 1 } + ├── Upstream + └── BatchPlanNode + + Fragment 3 + StreamProject { exprs: [t3.a, t3.b, t3.c, t3._row_id, 2:Int32] } + └── Chain { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } { state table: 2 } + ├── Upstream + └── BatchPlanNode + + Fragment 4 + StreamProject { exprs: [t4.a, t4.b, t4.c, t4._row_id, 3:Int32] } + └── Chain { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { state table: 3 } + ├── Upstream + └── BatchPlanNode + + Fragment 5 + StreamProject { exprs: [t5.a, t5.b, t5.c, t5._row_id, 4:Int32] } + └── Chain { table: t5, columns: [t5.a, t5.b, t5.c, t5._row_id], pk: [t5._row_id], dist: UpstreamHashShard(t5._row_id) } { state table: 4 } + ├── Upstream + └── BatchPlanNode + + Table 0 + ├── columns: [ vnode, _row_id, t1_backfill_finished, t1_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 1 + ├── columns: [ vnode, _row_id, t2_backfill_finished, t2_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 2 + ├── columns: [ vnode, _row_id, t3_backfill_finished, t3_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 3 + ├── columns: [ vnode, _row_id, t4_backfill_finished, t4_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 4 + ├── columns: [ vnode, _row_id, t5_backfill_finished, t5_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 4294967294 + ├── columns: [ a, b, c, t1._row_id, $src ] + ├── primary key: [ $3 ASC, $4 ASC ] + ├── value indices: [ 0, 1, 2, 3, 4 ] + ├── distribution key: [ 3, 4 ] + └── read pk prefix len hint: 2 + +- name: test merged union stream key (5 columns, row_id + src_col + a + b + c) + sql: | + create table t1 (a int, b numeric, c bigint, primary key (a)); + create table t2 (a int, b numeric, c bigint, primary key (b)); + create table t3 (a int, b numeric, c bigint, primary key (c)); + create table t4 (a int, b numeric, c bigint); + create table t5 (a int, b numeric, c bigint, primary key (a, b)); + select * from t1 union all select * from t2 union all select * from t3 union all select * from t4 union all select * from t5; + stream_dist_plan: |+ + Fragment 0 + StreamMaterialize { columns: [a, b, c, t1.a(hidden), null:Int64(hidden), null:Decimal(hidden), null:Serial(hidden), $src(hidden)], stream_key: [t1.a, null:Decimal, null:Int64, null:Serial, $src], pk_columns: [t1.a, null:Decimal, null:Int64, null:Serial, $src], pk_conflict: NoCheck } + ├── materialized table: 4294967294 + └── StreamUnion { all: true } + ├── StreamExchange Hash([3, 5, 4, 6, 7]) from 1 + ├── StreamExchange Hash([3, 5, 4, 6, 7]) from 2 + ├── StreamExchange Hash([3, 5, 4, 6, 7]) from 3 + ├── StreamExchange Hash([3, 5, 4, 6, 7]) from 4 + └── StreamExchange Hash([3, 5, 4, 6, 7]) from 5 + + Fragment 1 + StreamProject { exprs: [t1.a, t1.b, t1.c, t1.a, null:Int64, null:Decimal, null:Serial, 0:Int32] } + └── Chain { table: t1, columns: [t1.a, t1.b, t1.c], pk: [t1.a], dist: UpstreamHashShard(t1.a) } { state table: 0 } + ├── Upstream + └── BatchPlanNode + + Fragment 2 + StreamProject { exprs: [t2.a, t2.b, t2.c, null:Int32, null:Int64, t2.b, null:Serial, 1:Int32] } + └── Chain { table: t2, columns: [t2.a, t2.b, t2.c], pk: [t2.b], dist: UpstreamHashShard(t2.b) } { state table: 1 } + ├── Upstream + └── BatchPlanNode + + Fragment 3 + StreamProject { exprs: [t3.a, t3.b, t3.c, null:Int32, t3.c, null:Decimal, null:Serial, 2:Int32] } + └── Chain { table: t3, columns: [t3.a, t3.b, t3.c], pk: [t3.c], dist: UpstreamHashShard(t3.c) } { state table: 2 } + ├── Upstream + └── BatchPlanNode + + Fragment 4 + StreamProject { exprs: [t4.a, t4.b, t4.c, null:Int32, null:Int64, null:Decimal, t4._row_id, 3:Int32] } + └── Chain { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { state table: 3 } + ├── Upstream + └── BatchPlanNode + + Fragment 5 + StreamProject { exprs: [t5.a, t5.b, t5.c, t5.a, null:Int64, t5.b, null:Serial, 4:Int32] } + └── Chain { table: t5, columns: [t5.a, t5.b, t5.c], pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { state table: 4 } + ├── Upstream + └── BatchPlanNode + + Table 0 { columns: [ vnode, a, t1_backfill_finished, t1_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 1 { columns: [ vnode, b, t2_backfill_finished, t2_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 2 { columns: [ vnode, c, t3_backfill_finished, t3_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 3 { columns: [ vnode, _row_id, t4_backfill_finished, t4_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 4 { columns: [ vnode, a, b, t5_backfill_finished, t5_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 4294967294 { columns: [ a, b, c, t1.a, null:Int64, null:Decimal, null:Serial, $src ], primary key: [ $3 ASC, $5 ASC, $4 ASC, $6 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [ 3, 5, 4, 6, 7 ], read pk prefix len hint: 5 } + +- name: test merged union stream key (4 columns, row_id + src_col + a + b) + sql: | + create table t1 (a int, b numeric, c bigint, primary key (a)); + create table t2 (a int, b numeric, c bigint, primary key (b)); + create table t3 (a int, b numeric, c bigint); + create table t4 (a int, b numeric, c bigint); + create table t5 (a int, b numeric, c bigint, primary key (a, b)); + select * from t1 union all select * from t2 union all select * from t3 union all select * from t4 union all select * from t5; + stream_dist_plan: |+ + Fragment 0 + StreamMaterialize { columns: [a, b, c, t1.a(hidden), null:Decimal(hidden), null:Serial(hidden), $src(hidden)], stream_key: [t1.a, null:Decimal, null:Serial, $src], pk_columns: [t1.a, null:Decimal, null:Serial, $src], pk_conflict: NoCheck } + ├── materialized table: 4294967294 + └── StreamUnion { all: true } + ├── StreamExchange Hash([3, 4, 5, 6]) from 1 + ├── StreamExchange Hash([3, 4, 5, 6]) from 2 + ├── StreamExchange Hash([3, 4, 5, 6]) from 3 + ├── StreamExchange Hash([3, 4, 5, 6]) from 4 + └── StreamExchange Hash([3, 4, 5, 6]) from 5 + + Fragment 1 + StreamProject { exprs: [t1.a, t1.b, t1.c, t1.a, null:Decimal, null:Serial, 0:Int32] } + └── Chain { table: t1, columns: [t1.a, t1.b, t1.c], pk: [t1.a], dist: UpstreamHashShard(t1.a) } { state table: 0 } + ├── Upstream + └── BatchPlanNode + + Fragment 2 + StreamProject { exprs: [t2.a, t2.b, t2.c, null:Int32, t2.b, null:Serial, 1:Int32] } + └── Chain { table: t2, columns: [t2.a, t2.b, t2.c], pk: [t2.b], dist: UpstreamHashShard(t2.b) } { state table: 1 } + ├── Upstream + └── BatchPlanNode + + Fragment 3 + StreamProject { exprs: [t3.a, t3.b, t3.c, null:Int32, null:Decimal, t3._row_id, 2:Int32] } + └── Chain { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } { state table: 2 } + ├── Upstream + └── BatchPlanNode + + Fragment 4 + StreamProject { exprs: [t4.a, t4.b, t4.c, null:Int32, null:Decimal, t4._row_id, 3:Int32] } + └── Chain { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { state table: 3 } + ├── Upstream + └── BatchPlanNode + + Fragment 5 + StreamProject { exprs: [t5.a, t5.b, t5.c, t5.a, t5.b, null:Serial, 4:Int32] } + └── Chain { table: t5, columns: [t5.a, t5.b, t5.c], pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { state table: 4 } + ├── Upstream + └── BatchPlanNode + + Table 0 { columns: [ vnode, a, t1_backfill_finished, t1_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 1 { columns: [ vnode, b, t2_backfill_finished, t2_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 2 { columns: [ vnode, _row_id, t3_backfill_finished, t3_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 3 { columns: [ vnode, _row_id, t4_backfill_finished, t4_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 4 { columns: [ vnode, a, b, t5_backfill_finished, t5_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } + + Table 4294967294 { columns: [ a, b, c, t1.a, null:Decimal, null:Serial, $src ], primary key: [ $3 ASC, $4 ASC, $5 ASC, $6 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [ 3, 4, 5, 6 ], read pk prefix len hint: 4 } + +- name: test merged union stream key (3 columns, src_col + a + b) + sql: | + create table t1 (a int, b numeric, c bigint, primary key (a)); + create table t2 (a int, b numeric, c bigint, primary key (b)); + create table t3 (a int, b numeric, c bigint, primary key (b)); + create table t4 (a int, b numeric, c bigint, primary key (b, a)); + create table t5 (a int, b numeric, c bigint, primary key (a, b)); + select * from t1 union all select * from t2 union all select * from t3 union all select * from t4 union all select * from t5; + stream_dist_plan: |+ + Fragment 0 + StreamMaterialize { columns: [a, b, c, $src(hidden)], stream_key: [a, b, $src], pk_columns: [a, b, $src], pk_conflict: NoCheck } + ├── materialized table: 4294967294 + └── StreamUnion { all: true } + ├── StreamExchange Hash([0, 1, 3]) from 1 + ├── StreamExchange Hash([0, 1, 3]) from 2 + ├── StreamExchange Hash([0, 1, 3]) from 3 + ├── StreamExchange Hash([0, 1, 3]) from 4 + └── StreamExchange Hash([0, 1, 3]) from 5 + + Fragment 1 + StreamProject { exprs: [t1.a, t1.b, t1.c, 0:Int32] } + └── Chain { table: t1, columns: [t1.a, t1.b, t1.c], pk: [t1.a], dist: UpstreamHashShard(t1.a) } { state table: 0 } + ├── Upstream + └── BatchPlanNode + + Fragment 2 + StreamProject { exprs: [t2.a, t2.b, t2.c, 1:Int32] } + └── Chain { table: t2, columns: [t2.a, t2.b, t2.c], pk: [t2.b], dist: UpstreamHashShard(t2.b) } { state table: 1 } + ├── Upstream + └── BatchPlanNode + + Fragment 3 + StreamProject { exprs: [t3.a, t3.b, t3.c, 2:Int32] } + └── Chain { table: t3, columns: [t3.a, t3.b, t3.c], pk: [t3.b], dist: UpstreamHashShard(t3.b) } { state table: 2 } + ├── Upstream + └── BatchPlanNode + + Fragment 4 + StreamProject { exprs: [t4.a, t4.b, t4.c, 3:Int32] } + └── Chain { table: t4, columns: [t4.a, t4.b, t4.c], pk: [t4.b, t4.a], dist: UpstreamHashShard(t4.a, t4.b) } { state table: 3 } + ├── Upstream + └── BatchPlanNode + + Fragment 5 + StreamProject { exprs: [t5.a, t5.b, t5.c, 4:Int32] } + └── Chain { table: t5, columns: [t5.a, t5.b, t5.c], pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { state table: 4 } + ├── Upstream + └── BatchPlanNode + + Table 0 + ├── columns: [ vnode, a, t1_backfill_finished, t1_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 1 + ├── columns: [ vnode, b, t2_backfill_finished, t2_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 2 + ├── columns: [ vnode, b, t3_backfill_finished, t3_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 3 + ├── columns: [ vnode, b, a, t4_backfill_finished, t4_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3, 4 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 4 + ├── columns: [ vnode, a, b, t5_backfill_finished, t5_row_count ] + ├── primary key: [ $0 ASC ] + ├── value indices: [ 1, 2, 3, 4 ] + ├── distribution key: [ 0 ] + ├── read pk prefix len hint: 1 + └── vnode column idx: 0 + + Table 4294967294 + ├── columns: [ a, b, c, $src ] + ├── primary key: [ $0 ASC, $1 ASC, $3 ASC ] + ├── value indices: [ 0, 1, 2, 3 ] + ├── distribution key: [ 0, 1, 3 ] + └── read pk prefix len hint: 3 + diff --git a/src/frontend/planner_test/tests/testdata/output/update.yaml b/src/frontend/planner_test/tests/testdata/output/update.yaml index f3a27a3d2e514..3db7ac3501018 100644 --- a/src/frontend/planner_test/tests/testdata/output/update.yaml +++ b/src/frontend/planner_test/tests/testdata/output/update.yaml @@ -116,3 +116,41 @@ create table t(v1 int as v2-1, v2 int, v3 int as v2+1, primary key (v3)); update t set v2 = 3; binder_error: 'Bind error: update modifying the column referenced by generated columns that are part of the primary key is not allowed' +- name: update subquery + sql: | + create table t (a int, b int); + update t set a = 777 where b not in (select a from t); + logical_plan: |- + LogicalUpdate { table: t, exprs: [777:Int32, $1, $2] } + └─LogicalApply { type: LeftAnti, on: (t.b = t.a), correlated_id: 1 } + ├─LogicalScan { table: t, columns: [t.a, t.b, t._row_id] } + └─LogicalProject { exprs: [t.a] } + └─LogicalScan { table: t, columns: [t.a, t.b, t._row_id] } + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchUpdate { table: t, exprs: [777:Int32, $1, $2] } + └─BatchExchange { order: [], dist: Single } + └─BatchHashJoin { type: LeftAnti, predicate: t.b = t.a, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.b) } + │ └─BatchScan { table: t, columns: [t.a, t.b, t._row_id], distribution: UpstreamHashShard(t._row_id) } + └─BatchExchange { order: [], dist: HashShard(t.a) } + └─BatchScan { table: t, columns: [t.a], distribution: SomeShard } +- name: delete subquery + sql: | + create table t (a int, b int); + delete from t where a not in (select b from t); + logical_plan: |- + LogicalDelete { table: t } + └─LogicalApply { type: LeftAnti, on: (t.a = t.b), correlated_id: 1 } + ├─LogicalScan { table: t, columns: [t.a, t.b, t._row_id] } + └─LogicalProject { exprs: [t.b] } + └─LogicalScan { table: t, columns: [t.a, t.b, t._row_id] } + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchDelete { table: t } + └─BatchExchange { order: [], dist: Single } + └─BatchHashJoin { type: LeftAnti, predicate: t.a = t.b, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.a) } + │ └─BatchScan { table: t, columns: [t.a, t.b, t._row_id], distribution: UpstreamHashShard(t._row_id) } + └─BatchExchange { order: [], dist: HashShard(t.b) } + └─BatchScan { table: t, columns: [t.b], distribution: SomeShard } diff --git a/src/frontend/planner_test/tests/testdata/output/watermark.yaml b/src/frontend/planner_test/tests/testdata/output/watermark.yaml index d1916a33192c6..d57d41fa76bc3 100644 --- a/src/frontend/planner_test/tests/testdata/output/watermark.yaml +++ b/src/frontend/planner_test/tests/testdata/output/watermark.yaml @@ -79,11 +79,12 @@ select t1.ts as t1_ts, t2.ts as ts2, t1.v1 as t1_v1, t1.v2 as t1_v2, t2.v1 as t2_v1, t2.v2 as t2_v2 from t1, t2 where t1.ts = t2.ts; stream_plan: |- StreamMaterialize { columns: [t1_ts, ts2, t1_v1, t1_v2, t2_v1, t2_v2, t1._row_id(hidden), t2._row_id(hidden)], stream_key: [t1._row_id, t2._row_id, t1_ts], pk_columns: [t1._row_id, t2._row_id, t1_ts], pk_conflict: NoCheck, watermark_columns: [t1_ts, ts2] } - └─StreamHashJoin [window, append_only] { type: Inner, predicate: t1.ts = t2.ts, output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t2.ts, t1.v1, t1.v2, t2.v1, t2.v2, t1._row_id, t2._row_id] } - ├─StreamExchange { dist: HashShard(t1.ts) } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(t2.ts) } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.ts, t1._row_id, t2._row_id) } + └─StreamHashJoin [window, append_only] { type: Inner, predicate: t1.ts = t2.ts, output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t2.ts, t1.v1, t1.v2, t2.v1, t2.v2, t1._row_id, t2._row_id] } + ├─StreamExchange { dist: HashShard(t1.ts) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamExchange { dist: HashShard(t2.ts) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: left semi window join sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -91,11 +92,12 @@ select t1.ts as t1_ts, t1.v1 as t1_v1, t1.v2 as t1_v2 from t1 where exists (select * from t2 where t1.ts = t2.ts); stream_plan: |- StreamMaterialize { columns: [t1_ts, t1_v1, t1_v2, t1._row_id(hidden)], stream_key: [t1._row_id, t1_ts], pk_columns: [t1._row_id, t1_ts], pk_conflict: NoCheck, watermark_columns: [t1_ts] } - └─StreamHashJoin [window] { type: LeftSemi, predicate: t1.ts = t2.ts, output_watermarks: [t1.ts], output: all } - ├─StreamExchange { dist: HashShard(t1.ts) } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(t2.ts) } - └─StreamTableScan { table: t2, columns: [t2.ts, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.ts, t1._row_id) } + └─StreamHashJoin [window] { type: LeftSemi, predicate: t1.ts = t2.ts, output_watermarks: [t1.ts], output: all } + ├─StreamExchange { dist: HashShard(t1.ts) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamExchange { dist: HashShard(t2.ts) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: interval join(left outer join) sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -108,13 +110,14 @@ └─LogicalScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id] } stream_plan: |- StreamMaterialize { columns: [t1_ts, t1_v1, t1_v2, t2_ts, t2_v1, t2_v2, t1._row_id(hidden), t2._row_id(hidden)], stream_key: [t1._row_id, t2._row_id, t1_v1], pk_columns: [t1._row_id, t2._row_id, t1_v1], pk_conflict: NoCheck, watermark_columns: [t1_ts, t2_ts] } - └─StreamHashJoin [interval] { type: LeftOuter, predicate: t1.v1 = t2.v1 AND (t1.ts >= $expr2) AND ($expr1 <= t2.ts), conditions_to_clean_left_state_table: (t1.ts >= $expr2), conditions_to_clean_right_state_table: ($expr1 <= t2.ts), output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t1.v1, t1.v2, t2.ts, t2.v1, t2.v2, t1._row_id, t2._row_id] } - ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, AddWithTimeZone(t1.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr1, t1._row_id], output_watermarks: [t1.ts, $expr1] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(t2.v1) } - └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, AddWithTimeZone(t2.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr2, t2._row_id], output_watermarks: [t2.ts, $expr2] } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.v1, t1._row_id, t2._row_id) } + └─StreamHashJoin [interval] { type: LeftOuter, predicate: t1.v1 = t2.v1 AND (t1.ts >= $expr2) AND ($expr1 <= t2.ts), conditions_to_clean_left_state_table: (t1.ts >= $expr2), conditions_to_clean_right_state_table: ($expr1 <= t2.ts), output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t1.v1, t1.v2, t2.ts, t2.v1, t2.v2, t1._row_id, t2._row_id] } + ├─StreamExchange { dist: HashShard(t1.v1) } + │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, AddWithTimeZone(t1.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr1, t1._row_id], output_watermarks: [t1.ts, $expr1] } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamExchange { dist: HashShard(t2.v1) } + └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, AddWithTimeZone(t2.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr2, t2._row_id], output_watermarks: [t2.ts, $expr2] } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: interval join (inner join) sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -127,26 +130,27 @@ └─LogicalScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id] } stream_plan: |- StreamMaterialize { columns: [t1_ts, t1_v1, t1_v2, t2_ts, t2_v1, t2_v2, t1._row_id(hidden), t2._row_id(hidden)], stream_key: [t1._row_id, t2._row_id, t1_v1], pk_columns: [t1._row_id, t2._row_id, t1_v1], pk_conflict: NoCheck, watermark_columns: [t1_ts, t2_ts] } - └─StreamHashJoin [interval, append_only] { type: Inner, predicate: t1.v1 = t2.v1 AND (t1.ts >= $expr2) AND ($expr1 <= t2.ts), conditions_to_clean_left_state_table: (t1.ts >= $expr2), conditions_to_clean_right_state_table: ($expr1 <= t2.ts), output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t1.v1, t1.v2, t2.ts, t2.v1, t2.v2, t1._row_id, t2._row_id] } - ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, AddWithTimeZone(t1.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr1, t1._row_id], output_watermarks: [t1.ts, $expr1] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(t2.v1) } - └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, AddWithTimeZone(t2.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr2, t2._row_id], output_watermarks: [t2.ts, $expr2] } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamExchange { dist: HashShard(t1.v1, t1._row_id, t2._row_id) } + └─StreamHashJoin [interval, append_only] { type: Inner, predicate: t1.v1 = t2.v1 AND (t1.ts >= $expr2) AND ($expr1 <= t2.ts), conditions_to_clean_left_state_table: (t1.ts >= $expr2), conditions_to_clean_right_state_table: ($expr1 <= t2.ts), output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t1.v1, t1.v2, t2.ts, t2.v1, t2.v2, t1._row_id, t2._row_id] } + ├─StreamExchange { dist: HashShard(t1.v1) } + │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, AddWithTimeZone(t1.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr1, t1._row_id], output_watermarks: [t1.ts, $expr1] } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamExchange { dist: HashShard(t2.v1) } + └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, AddWithTimeZone(t2.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr2, t2._row_id], output_watermarks: [t2.ts, $expr2] } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: union all sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; create table t2 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; select * from t1 Union all select * from t2; stream_plan: |- - StreamMaterialize { columns: [ts, v1, v2, t1._row_id(hidden), null:Serial(hidden), 0:Int32(hidden)], stream_key: [t1._row_id, null:Serial, 0:Int32], pk_columns: [t1._row_id, null:Serial, 0:Int32], pk_conflict: NoCheck, watermark_columns: [ts] } + StreamMaterialize { columns: [ts, v1, v2, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck, watermark_columns: [ts] } └─StreamUnion { all: true, output_watermarks: [t1.ts] } - ├─StreamExchange { dist: HashShard(t1._row_id, null:Serial, 0:Int32) } - │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, t1._row_id, null:Serial, 0:Int32], output_watermarks: [t1.ts] } + ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } + │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, t1._row_id, 0:Int32], output_watermarks: [t1.ts] } │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(null:Serial, t2._row_id, 1:Int32) } - └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, null:Serial, t2._row_id, 1:Int32], output_watermarks: [t2.ts] } + └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } + └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, t2._row_id, 1:Int32], output_watermarks: [t2.ts] } └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: union sql: | @@ -159,11 +163,11 @@ └─StreamExchange { dist: HashShard(t1.ts, t1.v1, t1.v2) } └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2], output_watermarks: [t1.ts] } └─StreamUnion { all: true, output_watermarks: [t1.ts] } - ├─StreamExchange { dist: HashShard(t1._row_id, null:Serial, 0:Int32) } - │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, t1._row_id, null:Serial, 0:Int32], output_watermarks: [t1.ts] } + ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } + │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, t1._row_id, 0:Int32], output_watermarks: [t1.ts] } │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - └─StreamExchange { dist: HashShard(null:Serial, t2._row_id, 1:Int32) } - └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, null:Serial, t2._row_id, 1:Int32], output_watermarks: [t2.ts] } + └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } + └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, t2._row_id, 1:Int32], output_watermarks: [t2.ts] } └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: tumble sql: | diff --git a/src/frontend/planner_test/tests/testdata/output/window_join.yaml b/src/frontend/planner_test/tests/testdata/output/window_join.yaml index 4113a6021e866..17c5e76f6e806 100644 --- a/src/frontend/planner_test/tests/testdata/output/window_join.yaml +++ b/src/frontend/planner_test/tests/testdata/output/window_join.yaml @@ -12,15 +12,16 @@ select * from t1, t2 where ts1 = ts2 and a1 = a2; stream_plan: |- StreamMaterialize { columns: [ts1, a1, b1, ts2, a2, b2, _row_id(hidden), _row_id#1(hidden)], stream_key: [_row_id, _row_id#1, ts1, a1], pk_columns: [_row_id, _row_id#1, ts1, a1], pk_conflict: NoCheck, watermark_columns: [ts1, ts2] } - └─StreamHashJoin [window, append_only] { type: Inner, predicate: ts1 = ts2 AND a1 = a2, output_watermarks: [ts1, ts2], output: [ts1, a1, b1, ts2, a2, b2, _row_id, _row_id] } - ├─StreamExchange { dist: HashShard(ts1, a1) } - │ └─StreamRowIdGen { row_id_index: 3 } - │ └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts1, expr: (ts1 - '00:00:01':Interval) }], output_watermarks: [ts1] } - │ └─StreamSource { source: t1, columns: [ts1, a1, b1, _row_id] } - └─StreamExchange { dist: HashShard(ts2, a2) } - └─StreamRowIdGen { row_id_index: 3 } - └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts2, expr: (ts2 - '00:00:01':Interval) }], output_watermarks: [ts2] } - └─StreamSource { source: t2, columns: [ts2, a2, b2, _row_id] } + └─StreamExchange { dist: HashShard(ts1, a1, _row_id, _row_id) } + └─StreamHashJoin [window, append_only] { type: Inner, predicate: ts1 = ts2 AND a1 = a2, output_watermarks: [ts1, ts2], output: [ts1, a1, b1, ts2, a2, b2, _row_id, _row_id] } + ├─StreamExchange { dist: HashShard(ts1, a1) } + │ └─StreamRowIdGen { row_id_index: 3 } + │ └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts1, expr: (ts1 - '00:00:01':Interval) }], output_watermarks: [ts1] } + │ └─StreamSource { source: t1, columns: [ts1, a1, b1, _row_id] } + └─StreamExchange { dist: HashShard(ts2, a2) } + └─StreamRowIdGen { row_id_index: 3 } + └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts2, expr: (ts2 - '00:00:01':Interval) }], output_watermarks: [ts2] } + └─StreamSource { source: t2, columns: [ts2, a2, b2, _row_id] } - name: Window join expression reorder sql: | create source t1 (ts1 timestamp with time zone, a1 int, b1 int, watermark for ts1 as ts1 - INTERVAL '1' SECOND) with ( @@ -34,12 +35,13 @@ select * from t1, t2 where a1 = a2 and ts1 = ts2; stream_plan: |- StreamMaterialize { columns: [ts1, a1, b1, ts2, a2, b2, _row_id(hidden), _row_id#1(hidden)], stream_key: [_row_id, _row_id#1, a1, ts1], pk_columns: [_row_id, _row_id#1, a1, ts1], pk_conflict: NoCheck, watermark_columns: [ts1, ts2] } - └─StreamHashJoin [window, append_only] { type: Inner, predicate: ts1 = ts2 AND a1 = a2, output_watermarks: [ts1, ts2], output: [ts1, a1, b1, ts2, a2, b2, _row_id, _row_id] } - ├─StreamExchange { dist: HashShard(ts1, a1) } - │ └─StreamRowIdGen { row_id_index: 3 } - │ └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts1, expr: (ts1 - '00:00:01':Interval) }], output_watermarks: [ts1] } - │ └─StreamSource { source: t1, columns: [ts1, a1, b1, _row_id] } - └─StreamExchange { dist: HashShard(ts2, a2) } - └─StreamRowIdGen { row_id_index: 3 } - └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts2, expr: (ts2 - '00:00:01':Interval) }], output_watermarks: [ts2] } - └─StreamSource { source: t2, columns: [ts2, a2, b2, _row_id] } + └─StreamExchange { dist: HashShard(ts1, a1, _row_id, _row_id) } + └─StreamHashJoin [window, append_only] { type: Inner, predicate: ts1 = ts2 AND a1 = a2, output_watermarks: [ts1, ts2], output: [ts1, a1, b1, ts2, a2, b2, _row_id, _row_id] } + ├─StreamExchange { dist: HashShard(ts1, a1) } + │ └─StreamRowIdGen { row_id_index: 3 } + │ └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts1, expr: (ts1 - '00:00:01':Interval) }], output_watermarks: [ts1] } + │ └─StreamSource { source: t1, columns: [ts1, a1, b1, _row_id] } + └─StreamExchange { dist: HashShard(ts2, a2) } + └─StreamRowIdGen { row_id_index: 3 } + └─StreamWatermarkFilter { watermark_descs: [Desc { column: ts2, expr: (ts2 - '00:00:01':Interval) }], output_watermarks: [ts2] } + └─StreamSource { source: t2, columns: [ts2, a2, b2, _row_id] } diff --git a/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml b/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml new file mode 100644 index 0000000000000..867855fe7b44d --- /dev/null +++ b/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml @@ -0,0 +1,210 @@ +# This file is automatically generated. See `src/frontend/planner_test/README.md` for more information. +- sql: | + select * from unnest(array[1,2,3]) WITH ORDINALITY; + batch_plan: |- + BatchProject { exprs: [Unnest(ARRAY[1, 2, 3]:List(Int32)), (projected_row_id + 1:Int64) as $expr1] } + └─BatchProjectSet { select_list: [Unnest(ARRAY[1, 2, 3]:List(Int32))] } + └─BatchValues { rows: [[]] } + stream_plan: |- + StreamMaterialize { columns: [unnest, ordinality, _row_id(hidden), projected_row_id(hidden)], stream_key: [_row_id, projected_row_id], pk_columns: [_row_id, projected_row_id], pk_conflict: NoCheck } + └─StreamProject { exprs: [Unnest(ARRAY[1, 2, 3]:List(Int32)), (projected_row_id + 1:Int64) as $expr1, _row_id, projected_row_id] } + └─StreamProjectSet { select_list: [Unnest(ARRAY[1, 2, 3]:List(Int32)), $0] } + └─StreamValues { rows: [[0:Int64]] } +- sql: | + create table t(x int , arr int[]); + select * from t cross join unnest(arr) WITH ORDINALITY; + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t.arr) } + └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, arr, unnest, ordinality, t._row_id(hidden), projected_row_id(hidden)], stream_key: [t._row_id, projected_row_id, arr], pk_columns: [t._row_id, projected_row_id, arr], pk_conflict: NoCheck } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + ├─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamExchange { dist: HashShard(t.arr) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } +- sql: | + create table t(x int , arr int[]); + select * from t cross join unnest(arr) WITH ORDINALITY as foo; + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t.arr) } + └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, arr, foo, ordinality, t._row_id(hidden), projected_row_id(hidden)], stream_key: [t._row_id, projected_row_id, arr], pk_columns: [t._row_id, projected_row_id, arr], pk_conflict: NoCheck } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + ├─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamExchange { dist: HashShard(t.arr) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } +- sql: | + create table t(x int , arr int[]); + select * from t cross join unnest(arr) WITH ORDINALITY as foo(a); + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t.arr) } + └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, arr, a, ordinality, t._row_id(hidden), projected_row_id(hidden)], stream_key: [t._row_id, projected_row_id, arr], pk_columns: [t._row_id, projected_row_id, arr], pk_conflict: NoCheck } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + ├─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamExchange { dist: HashShard(t.arr) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } +- sql: | + create table t(x int , arr int[]); + select * from t cross join unnest(arr) WITH ORDINALITY as foo(a,ord); + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t.arr) } + └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, arr, a, ord, t._row_id(hidden), projected_row_id(hidden)], stream_key: [t._row_id, projected_row_id, arr], pk_columns: [t._row_id, projected_row_id, arr], pk_conflict: NoCheck } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + ├─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamExchange { dist: HashShard(t.arr) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } +- name: use alias columns explicitlity + sql: | + create table t(x int , arr int[]); + select x, arr, a, ord from t cross join unnest(arr) WITH ORDINALITY as foo(a,ord); + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t.arr) } + └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, arr, a, ord, t._row_id(hidden), projected_row_id(hidden)], stream_key: [t._row_id, projected_row_id, arr], pk_columns: [t._row_id, projected_row_id, arr], pk_conflict: NoCheck } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + ├─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamExchange { dist: HashShard(t.arr) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } +- sql: | + create table t(x int , arr int[]); + select * from t cross join unnest(arr) WITH ORDINALITY as foo(a,ord,bar); + binder_error: 'Bind error: table "foo" has 2 columns available but 3 column aliases specified' +- sql: | + create table t(x int , arr int[]); + select * from t cross join unnest(arr) WITH ORDINALITY, unnest(arr) WITH ORDINALITY AS unnest_2(arr_2,ordinality_2); + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, Unnest($0), (projected_row_id + 1:Int64) as $expr2] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + ├─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: all } + │ ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + │ └─BatchProjectSet { select_list: [$0, Unnest($0)] } + │ └─BatchHashAgg { group_key: [t.arr], aggs: [] } + │ └─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.arr] } + ├─BatchExchange { order: [], dist: HashShard(t.arr) } + │ └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + └─BatchProjectSet { select_list: [$0, Unnest($0)] } + └─BatchHashAgg { group_key: [t.arr], aggs: [] } + └─BatchExchange { order: [], dist: HashShard(t.arr) } + └─BatchScan { table: t, columns: [t.arr], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, arr, unnest, ordinality, arr_2, ordinality_2, t._row_id(hidden), projected_row_id(hidden), projected_row_id#1(hidden)], stream_key: [t._row_id, projected_row_id, arr, projected_row_id#1], pk_columns: [t._row_id, projected_row_id, arr, projected_row_id#1], pk_conflict: NoCheck } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), $expr1, Unnest($0), (projected_row_id + 1:Int64) as $expr2, t._row_id, projected_row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, Unnest($0), $expr1, projected_row_id, t.arr, Unnest($0), t._row_id, projected_row_id] } + ├─StreamShare { id: 8 } + │ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + │ └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + │ ├─StreamExchange { dist: HashShard(t.arr) } + │ │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamProjectSet { select_list: [$0, Unnest($0)] } + │ └─StreamProject { exprs: [t.arr] } + │ └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + │ └─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamShare { id: 8 } + └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } + └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } + ├─StreamExchange { dist: HashShard(t.arr) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamProjectSet { select_list: [$0, Unnest($0)] } + └─StreamProject { exprs: [t.arr] } + └─StreamHashAgg { group_key: [t.arr], aggs: [count] } + └─StreamExchange { dist: HashShard(t.arr) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } +- sql: | + select * from abs(1) WITH ORDINALITY; + batch_plan: 'BatchValues { rows: [[1:Int32, 1:Int64]] }' + stream_plan: |- + StreamMaterialize { columns: [abs, ordinality, _row_id(hidden)], stream_key: [_row_id], pk_columns: [_row_id], pk_conflict: NoCheck } + └─StreamValues { rows: [[Abs(1:Int32), 1:Int64, 0:Int64]] } +- sql: | + create table t(x int , arr int[]); + select * from t, abs(x) WITH ORDINALITY; + batch_plan: |- + BatchNestedLoopJoin { type: Inner, predicate: true, output: all } + ├─BatchExchange { order: [], dist: Single } + │ └─BatchHashJoin { type: Inner, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, t.arr] } + │ ├─BatchExchange { order: [], dist: HashShard(t.x) } + │ │ └─BatchScan { table: t, columns: [t.x, t.arr], distribution: SomeShard } + │ └─BatchHashAgg { group_key: [t.x], aggs: [] } + │ └─BatchExchange { order: [], dist: HashShard(t.x) } + │ └─BatchScan { table: t, columns: [t.x], distribution: SomeShard } + └─BatchValues { rows: [[Abs(CorrelatedInputRef { index: 0, correlated_id: 1 }), 1:Int64]] } + stream_error: |- + Not supported: streaming nested-loop join + HINT: The non-equal join in the query requires a nested-loop join executor, which could be very expensive to run. Consider rewriting the query to use dynamic filter as a substitute if possible. + See also: https://github.com/risingwavelabs/rfcs/blob/main/rfcs/0033-dynamic-filter.md diff --git a/src/frontend/src/binder/expr/binary_op.rs b/src/frontend/src/binder/expr/binary_op.rs index 00b4a9f64a9cb..f7c8a86144fc9 100644 --- a/src/frontend/src/binder/expr/binary_op.rs +++ b/src/frontend/src/binder/expr/binary_op.rs @@ -108,11 +108,12 @@ impl Binder { ExprType::ConcatOp } - // jsonb, bytea (and varbit, tsvector, tsquery) - (Some(t @ DataType::Jsonb), Some(DataType::Jsonb)) - | (Some(t @ DataType::Jsonb), None) - | (None, Some(t @ DataType::Jsonb)) - | (Some(t @ DataType::Bytea), Some(DataType::Bytea)) + (Some(DataType::Jsonb), Some(DataType::Jsonb)) + | (Some(DataType::Jsonb), None) + | (None, Some(DataType::Jsonb)) => ExprType::JsonbCat, + + // bytea (and varbit, tsvector, tsquery) + (Some(t @ DataType::Bytea), Some(DataType::Bytea)) | (Some(t @ DataType::Bytea), None) | (None, Some(t @ DataType::Bytea)) => { return Err(ErrorCode::BindError(format!( diff --git a/src/frontend/src/binder/expr/function.rs b/src/frontend/src/binder/expr/function.rs index 6bfa4883c6a5e..18438b28c0a98 100644 --- a/src/frontend/src/binder/expr/function.rs +++ b/src/frontend/src/binder/expr/function.rs @@ -20,14 +20,13 @@ use std::sync::LazyLock; use bk_tree::{metrics, BKTree}; use itertools::Itertools; use risingwave_common::array::ListValue; -use risingwave_common::catalog::PG_CATALOG_SCHEMA_NAME; +use risingwave_common::catalog::{INFORMATION_SCHEMA_SCHEMA_NAME, PG_CATALOG_SCHEMA_NAME}; use risingwave_common::error::{ErrorCode, Result, RwError}; -use risingwave_common::format::{Formatter, FormatterNode, SpecifierType}; use risingwave_common::session_config::USER_NAME_WILD_CARD; use risingwave_common::types::{DataType, ScalarImpl, Timestamptz}; use risingwave_common::{GIT_SHA, RW_VERSION}; -use risingwave_expr::agg::{agg_kinds, AggKind}; -use risingwave_expr::function::window::{ +use risingwave_expr::aggregate::{agg_kinds, AggKind}; +use risingwave_expr::window_function::{ Frame, FrameBound, FrameBounds, FrameExclusion, WindowFuncKind, }; use risingwave_sqlparser::ast::{ @@ -60,12 +59,27 @@ impl Binder { [schema, name] => { let schema_name = schema.real_value(); if schema_name == PG_CATALOG_SCHEMA_NAME { + // pg_catalog is always effectively part of the search path, so we can always bind the function. + // Ref: https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-CATALOG name.real_value() + } else if schema_name == INFORMATION_SCHEMA_SCHEMA_NAME { + // definition of information_schema: https://github.com/postgres/postgres/blob/e0b2eed047df9045664da6f724cb42c10f8b12f0/src/backend/catalog/information_schema.sql + // + // FIXME: handle schema correctly, so that the functions are hidden if the schema is not in the search path. + let function_name = name.real_value(); + if function_name != "_pg_expandarray" { + return Err(ErrorCode::NotImplemented( + format!("Unsupported function name under schema: {}", schema_name), + 12422.into(), + ) + .into()); + } + function_name } else { - return Err(ErrorCode::BindError(format!( - "Unsupported function name under schema: {}", - schema_name - )) + return Err(ErrorCode::NotImplemented( + format!("Unsupported function name under schema: {}", schema_name), + 12422.into(), + ) .into()); } } @@ -139,7 +153,7 @@ impl Binder { } // user defined function - // TODO: resolve schema name + // TODO: resolve schema name https://github.com/risingwavelabs/risingwave/issues/12422 if let Ok(schema) = self.first_valid_schema() && let Some(func) = schema.get_function_by_name_args( &function_name, @@ -755,7 +769,7 @@ impl Binder { rewrite(ExprType::ConcatWs, Binder::rewrite_concat_to_concat_ws), ), ("concat_ws", raw_call(ExprType::ConcatWs)), - ("format", rewrite(ExprType::ConcatWs, Binder::rewrite_format_to_concat_ws)), + ("format", raw_call(ExprType::Format)), ("translate", raw_call(ExprType::Translate)), ("split_part", raw_call(ExprType::SplitPart)), ("char_length", raw_call(ExprType::CharLength)), @@ -767,6 +781,7 @@ impl Binder { ("regexp_match", raw_call(ExprType::RegexpMatch)), ("regexp_replace", raw_call(ExprType::RegexpReplace)), ("regexp_count", raw_call(ExprType::RegexpCount)), + ("regexp_split_to_array", raw_call(ExprType::RegexpSplitToArray)), ("chr", raw_call(ExprType::Chr)), ("starts_with", raw_call(ExprType::StartsWith)), ("initcap", raw_call(ExprType::Initcap)), @@ -801,6 +816,7 @@ impl Binder { ("array_remove", raw_call(ExprType::ArrayRemove)), ("array_replace", raw_call(ExprType::ArrayReplace)), ("array_max", raw_call(ExprType::ArrayMax)), + ("array_sum", raw_call(ExprType::ArraySum)), ("array_position", raw_call(ExprType::ArrayPosition)), ("array_positions", raw_call(ExprType::ArrayPositions)), ("trim_array", raw_call(ExprType::TrimArray)), @@ -850,6 +866,9 @@ impl Binder { ("jsonb_array_length", raw_call(ExprType::JsonbArrayLength)), // Functions that return a constant value ("pi", pi()), + // greatest and least + ("greatest", raw_call(ExprType::Greatest)), + ("least", raw_call(ExprType::Least)), // System information operations. ( "pg_typeof", @@ -1127,7 +1146,7 @@ impl Binder { // TODO: really implement them. // https://www.postgresql.org/docs/9.5/functions-info.html#FUNCTIONS-INFO-COMMENT-TABLE // WARN: Hacked in [`Binder::bind_function`]!!! - ("col_description", raw_literal(ExprImpl::literal_varchar("".to_string()))), + ("col_description", raw_call(ExprType::ColDescription)), ("obj_description", raw_literal(ExprImpl::literal_varchar("".to_string()))), ("shobj_description", raw_literal(ExprImpl::literal_varchar("".to_string()))), ("pg_is_in_recovery", raw_literal(ExprImpl::literal_bool(false))), @@ -1142,7 +1161,17 @@ impl Binder { // non-deterministic ("now", now()), ("current_timestamp", now()), - ("proctime", proctime()) + ("proctime", proctime()), + ("pg_sleep", raw_call(ExprType::PgSleep)), + ("pg_sleep_for", raw_call(ExprType::PgSleepFor)), + // TODO: implement pg_sleep_until + // ("pg_sleep_until", raw_call(ExprType::PgSleepUntil)), + + // cast functions + // only functions required by the existing PostgreSQL tool are implemented + ("date", guard_by_len(1, raw(|_binder, inputs| { + inputs[0].clone().cast_explicit(DataType::Date).map_err(Into::into) + }))), ] .into_iter() .collect() @@ -1199,75 +1228,6 @@ impl Binder { } } - fn rewrite_format_to_concat_ws(inputs: Vec) -> Result> { - let Some((format_expr, args)) = inputs.split_first() else { - return Err(ErrorCode::BindError( - "Function `format` takes at least 1 arguments (0 given)".to_string(), - ) - .into()); - }; - let ExprImpl::Literal(expr_literal) = format_expr else { - return Err(ErrorCode::BindError( - "Function `format` takes a literal string as the first argument".to_string(), - ) - .into()); - }; - let Some(ScalarImpl::Utf8(format_str)) = expr_literal.get_data() else { - return Err(ErrorCode::BindError( - "Function `format` takes a literal string as the first argument".to_string(), - ) - .into()); - }; - let formatter = Formatter::parse(format_str) - .map_err(|err| -> RwError { ErrorCode::BindError(err.to_string()).into() })?; - - let specifier_count = formatter - .nodes() - .iter() - .filter(|f_node| matches!(f_node, FormatterNode::Specifier(_))) - .count(); - if specifier_count != args.len() { - return Err(ErrorCode::BindError(format!( - "Function `format` required {} arguments based on the `formatstr`, but {} found.", - specifier_count, - args.len() - )) - .into()); - } - - // iter the args. - let mut j = 0; - let new_args = [Ok(ExprImpl::literal_varchar("".to_string()))] - .into_iter() - .chain(formatter.nodes().iter().map(move |f_node| -> Result<_> { - let new_arg = match f_node { - FormatterNode::Specifier(sp) => { - // We've checked the count. - let arg = &args[j]; - j += 1; - match sp.ty { - SpecifierType::SimpleString => arg.clone(), - SpecifierType::SqlIdentifier => { - FunctionCall::new(ExprType::QuoteIdent, vec![arg.clone()])?.into() - } - SpecifierType::SqlLiteral => { - return Err::<_, RwError>( - ErrorCode::BindError( - "unsupported specifier type 'L'".to_string(), - ) - .into(), - ) - } - } - } - FormatterNode::Literal(literal) => ExprImpl::literal_varchar(literal.clone()), - }; - Ok(new_arg) - })) - .try_collect()?; - Ok(new_args) - } - /// Make sure inputs only have 2 value and rewrite the arguments. /// Nullif(expr1,expr2) -> Case(Equal(expr1 = expr2),null,expr1). fn rewrite_nullif_to_case_when(inputs: Vec) -> Result> { diff --git a/src/frontend/src/binder/expr/mod.rs b/src/frontend/src/binder/expr/mod.rs index 7b80dcc3c5495..6da590c2d315d 100644 --- a/src/frontend/src/binder/expr/mod.rs +++ b/src/frontend/src/binder/expr/mod.rs @@ -523,32 +523,30 @@ impl Binder { // TODO: Add generic expr support when needed AstDataType::Regclass => { let input = self.bind_expr_inner(expr)?; - let class_name = match &input { - ExprImpl::Literal(literal) - if literal.return_type() == DataType::Varchar - && let Some(scalar) = literal.get_data() => - { - match scalar { - risingwave_common::types::ScalarImpl::Utf8(s) => s, - _ => { - return Err(ErrorCode::BindError( - "Unsupported input type".to_string(), - ) - .into()) - } - } - } - ExprImpl::Literal(literal) if literal.return_type().is_int() => { - return Ok(ExprImpl::Literal(literal.clone())) - } - _ => { - return Err( - ErrorCode::BindError("Unsupported input type".to_string()).into() - ) - } - }; - self.resolve_regclass(class_name) - .map(|id| ExprImpl::literal_int(id as i32)) + match input.return_type() { + DataType::Varchar => Ok(ExprImpl::FunctionCall(Box::new( + FunctionCall::new_unchecked( + ExprType::CastRegclass, + vec![input], + DataType::Int32, + ), + ))), + DataType::Int32 => Ok(input), + dt if dt.is_int() => Ok(input.cast_explicit(DataType::Int32)?), + _ => Err(ErrorCode::BindError("Unsupported input type".to_string()).into()), + } + } + AstDataType::Regproc => { + let lhs = self.bind_expr_inner(expr)?; + let lhs_ty = lhs.return_type(); + if lhs_ty == DataType::Varchar { + // FIXME: Currently, we only allow VARCHAR to be casted to Regproc. + // FIXME: Check whether it's a valid proc + // FIXME: The return type should be casted to Regproc, but we don't have this type. + Ok(lhs) + } else { + Err(ErrorCode::BindError(format!("Can't cast {} to regproc", lhs_ty)).into()) + } } _ => self.bind_cast_inner(expr, bind_data_type(&data_type)?), } @@ -655,6 +653,7 @@ pub fn bind_data_type(data_type: &AstDataType) -> Result { } AstDataType::Bytea => DataType::Bytea, AstDataType::Regclass + | AstDataType::Regproc | AstDataType::Uuid | AstDataType::Custom(_) | AstDataType::Decimal(_, _) diff --git a/src/frontend/src/binder/mod.rs b/src/frontend/src/binder/mod.rs index 974730cd16237..f1038f9bf5943 100644 --- a/src/frontend/src/binder/mod.rs +++ b/src/frontend/src/binder/mod.rs @@ -363,6 +363,13 @@ impl Binder { } } +/// The column name stored in [`BindContext`] for a column without an alias. +pub const UNNAMED_COLUMN: &str = "?column?"; +/// The table name stored in [`BindContext`] for a subquery without an alias. +const UNNAMED_SUBQUERY: &str = "?subquery?"; +/// The table name stored in [`BindContext`] for a column group. +const COLUMN_GROUP_PREFIX: &str = "?column_group_id?"; + #[cfg(test)] pub mod test_utils { use risingwave_common::types::DataType; @@ -380,10 +387,3 @@ pub mod test_utils { Binder::new_with_param_types(&SessionImpl::mock(), param_types) } } - -/// The column name stored in [`BindContext`] for a column without an alias. -pub const UNNAMED_COLUMN: &str = "?column?"; -/// The table name stored in [`BindContext`] for a subquery without an alias. -const UNNAMED_SUBQUERY: &str = "?subquery?"; -/// The table name stored in [`BindContext`] for a column group. -const COLUMN_GROUP_PREFIX: &str = "?column_group_id?"; diff --git a/src/frontend/src/binder/query.rs b/src/frontend/src/binder/query.rs index 5f85031c9833b..a3b78343c6041 100644 --- a/src/frontend/src/binder/query.rs +++ b/src/frontend/src/binder/query.rs @@ -92,9 +92,17 @@ impl BoundQuery { depth: Depth, correlated_id: CorrelatedId, ) -> Vec { - // TODO: collect `correlated_input_ref` in `extra_order_exprs`. - self.body - .collect_correlated_indices_by_depth_and_assign_id(depth, correlated_id) + let mut correlated_indices = vec![]; + + correlated_indices.extend( + self.body + .collect_correlated_indices_by_depth_and_assign_id(depth, correlated_id), + ); + + correlated_indices.extend(self.extra_order_exprs.iter_mut().flat_map(|expr| { + expr.collect_correlated_indices_by_depth_and_assign_id(depth, correlated_id) + })); + correlated_indices } /// Simple `VALUES` without other clauses. @@ -254,7 +262,7 @@ impl Binder { Ok(index) if 1 <= index && index <= visible_output_num => index - 1, _ => { return Err(ErrorCode::InvalidInputSyntax(format!( - "Invalid value in ORDER BY: {}", + "Invalid ordinal number in ORDER BY: {}", number )) .into()) diff --git a/src/frontend/src/binder/relation/join.rs b/src/frontend/src/binder/relation/join.rs index 3916289e86193..eb4ce96f9ab3f 100644 --- a/src/frontend/src/binder/relation/join.rs +++ b/src/frontend/src/binder/relation/join.rs @@ -56,7 +56,14 @@ impl Binder { self.push_lateral_context(); let right = self.bind_table_with_joins(t.clone())?; self.pop_and_merge_lateral_context()?; - root = if let Relation::Subquery(subquery) = &right && subquery.lateral { + + let is_lateral = match &right { + Relation::Subquery(subquery) if subquery.lateral => true, + Relation::TableFunction { .. } => true, + _ => false, + }; + + root = if is_lateral { Relation::Apply(Box::new(BoundJoin { join_type: JoinType::Inner, left: root, @@ -100,14 +107,20 @@ impl Binder { right = self.bind_table_factor(join.relation.clone())?; (cond, _) = self.bind_join_constraint(constraint, None, join_type)?; } - root = if let Relation::Subquery(subquery) = &right && subquery.lateral { + + let is_lateral = match &right { + Relation::Subquery(subquery) if subquery.lateral => true, + Relation::TableFunction { .. } => true, + _ => false, + }; + + root = if is_lateral { match join_type { - JoinType::Inner | JoinType::LeftOuter => {}, + JoinType::Inner | JoinType::LeftOuter => {} _ => { return Err(ErrorCode::InvalidInputSyntax("The combining JOIN type must be INNER or LEFT for a LATERAL reference.".to_string()) .into()); - - } + } } Relation::Apply(Box::new(BoundJoin { @@ -123,7 +136,7 @@ impl Binder { right, cond, })) - } + }; } Ok(root) diff --git a/src/frontend/src/binder/relation/mod.rs b/src/frontend/src/binder/relation/mod.rs index e92504fbb704b..a6a1a8d2b02f2 100644 --- a/src/frontend/src/binder/relation/mod.rs +++ b/src/frontend/src/binder/relation/mod.rs @@ -15,6 +15,7 @@ use std::collections::hash_map::Entry; use std::ops::Deref; +use itertools::{EitherOrBoth, Itertools}; use risingwave_common::catalog::{Field, TableId, DEFAULT_SCHEMA_NAME}; use risingwave_common::error::{internal_error, ErrorCode, Result, RwError}; use risingwave_sqlparser::ast::{ @@ -55,7 +56,11 @@ pub enum Relation { Join(Box), Apply(Box), WindowTableFunction(Box), - TableFunction(ExprImpl), + /// Table function or scalar function. + TableFunction { + expr: ExprImpl, + with_ordinality: bool, + }, Watermark(Box), Share(Box), } @@ -69,7 +74,9 @@ impl RewriteExprsRecursive for Relation { Relation::WindowTableFunction(inner) => inner.rewrite_exprs_recursive(rewriter), Relation::Watermark(inner) => inner.rewrite_exprs_recursive(rewriter), Relation::Share(inner) => inner.rewrite_exprs_recursive(rewriter), - Relation::TableFunction(inner) => *inner = rewriter.rewrite_expr(inner.take()), + Relation::TableFunction { expr: inner, .. } => { + *inner = rewriter.rewrite_expr(inner.take()) + } _ => {} } } @@ -113,8 +120,11 @@ impl Relation { ); correlated_indices } - Relation::TableFunction(table_function) => table_function - .collect_correlated_indices_by_depth_and_assign_id(depth, correlated_id), + Relation::TableFunction { + expr: table_function, + with_ordinality: _, + } => table_function + .collect_correlated_indices_by_depth_and_assign_id(depth + 1, correlated_id), _ => vec![], } } @@ -328,11 +338,13 @@ impl Binder { if let Some(from_alias) = alias { original_alias.name = from_alias.name; - let mut alias_iter = from_alias.columns.into_iter(); original_alias.columns = original_alias .columns .into_iter() - .map(|ident| alias_iter.next().unwrap_or(ident)) + .zip_longest( + from_alias.columns + ) + .map(EitherOrBoth::into_right) .collect(); } @@ -437,8 +449,16 @@ impl Binder { alias, for_system_time_as_of_proctime, } => self.bind_relation_by_name(name, alias, for_system_time_as_of_proctime), - TableFactor::TableFunction { name, alias, args } => { - self.bind_table_function(name, alias, args) + TableFactor::TableFunction { + name, + alias, + args, + with_ordinality, + } => { + self.try_mark_lateral_as_visible(); + let result = self.bind_table_function(name, alias, args, with_ordinality); + self.try_mark_lateral_as_invisible(); + result } TableFactor::Derived { lateral, diff --git a/src/frontend/src/binder/relation/table_function.rs b/src/frontend/src/binder/relation/table_function.rs index 1b53364e2dab5..988ea0561a860 100644 --- a/src/frontend/src/binder/relation/table_function.rs +++ b/src/frontend/src/binder/relation/table_function.rs @@ -18,6 +18,7 @@ use itertools::Itertools; use risingwave_common::catalog::{ Field, Schema, PG_CATALOG_SCHEMA_NAME, RW_INTERNAL_TABLE_FUNCTION_NAME, }; +use risingwave_common::error::ErrorCode; use risingwave_common::types::DataType; use risingwave_sqlparser::ast::{Function, FunctionArg, ObjectName, TableAlias}; @@ -34,16 +35,29 @@ impl Binder { /// /// Besides [`crate::expr::TableFunction`] expr, it can also be other things like window table /// functions, or scalar functions. + /// + /// `with_ordinality` is only supported for the `TableFunction` case now. pub(super) fn bind_table_function( &mut self, name: ObjectName, alias: Option, args: Vec, + with_ordinality: bool, ) -> Result { let func_name = &name.0[0].real_value(); // internal/system table functions { if func_name.eq_ignore_ascii_case(RW_INTERNAL_TABLE_FUNCTION_NAME) { + if with_ordinality { + return Err(ErrorCode::NotImplemented( + format!( + "WITH ORDINALITY for internal/system table function {}", + func_name + ), + None.into(), + ) + .into()); + } return self.bind_internal_table(args, alias); } if func_name.eq_ignore_ascii_case(PG_GET_KEYWORDS_FUNC_NAME) @@ -51,6 +65,16 @@ impl Binder { format!("{}.{}", PG_CATALOG_SCHEMA_NAME, PG_GET_KEYWORDS_FUNC_NAME).as_str(), ) { + if with_ordinality { + return Err(ErrorCode::NotImplemented( + format!( + "WITH ORDINALITY for internal/system table function {}", + func_name + ), + None.into(), + ) + .into()); + } return self.bind_relation_by_name_inner( Some(PG_CATALOG_SCHEMA_NAME), PG_KEYWORDS_TABLE_NAME, @@ -61,17 +85,31 @@ impl Binder { } // window table functions (tumble/hop) if let Ok(kind) = WindowTableFunctionKind::from_str(func_name) { + if with_ordinality { + return Err(ErrorCode::InvalidInputSyntax(format!( + "WITH ORDINALITY for window table function {}", + func_name + )) + .into()); + } return Ok(Relation::WindowTableFunction(Box::new( self.bind_window_table_function(alias, kind, args)?, ))); } // watermark if is_watermark_func(func_name) { + if with_ordinality { + return Err(ErrorCode::InvalidInputSyntax( + "WITH ORDINALITY for watermark".to_string(), + ) + .into()); + } return Ok(Relation::Watermark(Box::new( self.bind_watermark(alias, args)?, ))); }; + self.push_context(); let mut clause = Some(Clause::From); std::mem::swap(&mut self.context.clause, &mut clause); let func = self.bind_function(Function { @@ -82,16 +120,19 @@ impl Binder { order_by: vec![], filter: None, within_group: None, - })?; + }); self.context.clause = clause; + self.pop_context()?; + let func = func?; - let columns = if let DataType::Struct(s) = func.return_type() { - // If the table function returns a struct, it's fields can be accessed just - // like a table's columns. + // bool indicates if the field is hidden + let mut columns = if let DataType::Struct(s) = func.return_type() { + // If the table function returns a struct, it will be flattened into multiple columns. let schema = Schema::from(&s); schema.fields.into_iter().map(|f| (false, f)).collect_vec() } else { - // If there is an table alias, we should use the alias as the table function's + // If there is an table alias (and it doesn't return a struct), + // we should use the alias as the table function's // column name. If column aliases are also provided, they // are handled in bind_table_to_context. // @@ -109,9 +150,15 @@ impl Binder { }; vec![(false, Field::with_name(func.return_type(), col_name))] }; + if with_ordinality { + columns.push((false, Field::with_name(DataType::Int64, "ordinality"))); + } self.bind_table_to_context(columns, func_name.clone(), alias)?; - Ok(Relation::TableFunction(func)) + Ok(Relation::TableFunction { + expr: func, + with_ordinality, + }) } } diff --git a/src/frontend/src/binder/relation/table_or_source.rs b/src/frontend/src/binder/relation/table_or_source.rs index 480fcd20faf36..b05b5db42b300 100644 --- a/src/frontend/src/binder/relation/table_or_source.rs +++ b/src/frontend/src/binder/relation/table_or_source.rs @@ -21,7 +21,6 @@ use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_common::session_config::USER_NAME_WILD_CARD; use risingwave_sqlparser::ast::{Statement, TableAlias}; use risingwave_sqlparser::parser::Parser; -use risingwave_sqlparser::tokenizer::{Token, Tokenizer}; use super::BoundShare; use crate::binder::relation::BoundSubquery; @@ -377,44 +376,4 @@ impl Binder { Ok(table) } - - pub(crate) fn resolve_regclass(&self, class_name: &str) -> Result { - let obj = Self::parse_object_name(class_name)?; - - if obj.0.len() == 1 { - let class_name = obj.0[0].real_value(); - let schema_path = SchemaPath::Path(&self.search_path, &self.auth_context.user_name); - Ok(self - .catalog - .get_id_by_class_name(&self.db_name, schema_path, &class_name)?) - } else { - let schema = obj.0[0].real_value(); - let class_name = obj.0[1].real_value(); - let schema_path = SchemaPath::Name(&schema); - Ok(self - .catalog - .get_id_by_class_name(&self.db_name, schema_path, &class_name)?) - } - } - - /// Attempt to parse the value of a varchar Literal into an - /// [`ObjectName`](risingwave_sqlparser::ast::ObjectName). - fn parse_object_name(name: &str) -> Result { - // We use the full parser here because this function needs to accept every legal way - // of identifying an object in PG SQL as a valid value for the varchar - // literal. For example: 'foo', 'public.foo', '"my table"', and - // '"my schema".foo' must all work as values passed pg_table_size. - let mut tokenizer = Tokenizer::new(name); - let tokens = tokenizer - .tokenize_with_location() - .map_err(|e| ErrorCode::BindError(e.to_string()))?; - let mut parser = Parser::new(tokens); - let object = parser - .parse_object_name() - .map_err(|e| ErrorCode::BindError(e.to_string()))?; - if parser.next_token().token != Token::EOF { - Err(ErrorCode::BindError("Invalid name syntax".to_string()))? - } - Ok(object) - } } diff --git a/src/frontend/src/binder/select.rs b/src/frontend/src/binder/select.rs index eb2cacb6c65ab..48c4290ee7e05 100644 --- a/src/frontend/src/binder/select.rs +++ b/src/frontend/src/binder/select.rs @@ -20,10 +20,10 @@ use risingwave_common::catalog::{Field, Schema, PG_CATALOG_SCHEMA_NAME, RW_CATAL use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::AggKind; +use risingwave_expr::aggregate::AggKind; use risingwave_sqlparser::ast::{ BinaryOperator, DataType as AstDataType, Distinct, Expr, Ident, Join, JoinConstraint, - JoinOperator, ObjectName, Select, SelectItem, TableFactor, TableWithJoins, + JoinOperator, ObjectName, Select, SelectItem, TableFactor, TableWithJoins, Value, }; use super::bind_context::{Clause, ColumnBinding}; @@ -207,9 +207,10 @@ impl Binder { // Bind SELECT clause. let (select_items, aliases) = self.bind_select_list(select.projection)?; + let out_name_to_index = Self::build_name_to_index(aliases.iter().filter_map(Clone::clone)); // Bind DISTINCT ON. - let distinct = self.bind_distinct_on(select.distinct)?; + let distinct = self.bind_distinct_on(select.distinct, &out_name_to_index, &select_items)?; // Bind WHERE clause. self.context.clause = Some(Clause::Where); @@ -223,7 +224,6 @@ impl Binder { self.context.clause = None; // Bind GROUP BY clause. - let out_name_to_index = Self::build_name_to_index(aliases.iter().filter_map(Clone::clone)); self.context.clause = Some(Clause::GroupBy); // Only support one grouping item in group by clause @@ -360,6 +360,7 @@ impl Binder { } } } + assert_eq!(select_list.len(), aliases.len()); Ok((select_list, aliases)) } @@ -709,9 +710,7 @@ impl Binder { .expect("ExprImpl value is a Literal but cannot get ref to data") .as_utf8(); self.bind_cast( - Expr::Value(risingwave_sqlparser::ast::Value::SingleQuotedString( - table_name.to_string(), - )), + Expr::Value(Value::SingleQuotedString(table_name.to_string())), AstDataType::Regclass, ) } @@ -769,14 +768,67 @@ impl Binder { .unzip() } - fn bind_distinct_on(&mut self, distinct: Distinct) -> Result { + /// Bind `DISTINCT` clause in a [`Select`]. + /// Note that for `DISTINCT ON`, each expression is interpreted in the same way as `ORDER BY` + /// expression, which means it will be bound in the following order: + /// + /// * as an output-column name (can use aliases) + /// * as an index (from 1) of an output column + /// * as an arbitrary expression (cannot use aliases) + /// + /// See also the `bind_order_by_expr_in_query` method. + /// + /// # Arguments + /// + /// * `name_to_index` - output column name -> index. Ambiguous (duplicate) output names are + /// marked with `usize::MAX`. + fn bind_distinct_on( + &mut self, + distinct: Distinct, + name_to_index: &HashMap, + select_items: &[ExprImpl], + ) -> Result { Ok(match distinct { Distinct::All => BoundDistinct::All, Distinct::Distinct => BoundDistinct::Distinct, Distinct::DistinctOn(exprs) => { let mut bound_exprs = vec![]; for expr in exprs { - bound_exprs.push(self.bind_expr(expr)?); + let expr_impl = match expr { + Expr::Identifier(name) if let Some(index) = name_to_index.get(&name.real_value()) => { + match *index { + usize::MAX => { + return Err(ErrorCode::BindError(format!( + "DISTINCT ON \"{}\" is ambiguous", + name.real_value() + )) + .into()) + } + _ => { + InputRef::new(*index, select_items[*index].return_type()).into() + } + } + } + Expr::Value(Value::Number(number)) => { + match number.parse::() { + Ok(index) if 1 <= index && index <= select_items.len() => { + let idx_from_0 = index - 1; + InputRef::new(idx_from_0, select_items[idx_from_0].return_type()).into() + } + _ => { + return Err(ErrorCode::InvalidInputSyntax(format!( + "Invalid ordinal number in DISTINCT ON: {}", + number + )) + .into()) + } + } + } + expr => { + self.bind_expr(expr)? + } + }; + bound_exprs.push(expr_impl); } BoundDistinct::DistinctOn(bound_exprs) } @@ -822,9 +874,7 @@ fn derive_alias(expr: &Expr) -> Option { derive_alias(&expr).or_else(|| data_type_to_alias(&data_type)) } Expr::TypedString { data_type, .. } => data_type_to_alias(&data_type), - Expr::Value(risingwave_sqlparser::ast::Value::Interval { .. }) => { - Some("interval".to_string()) - } + Expr::Value(Value::Interval { .. }) => Some("interval".to_string()), Expr::Row(_) => Some("row".to_string()), Expr::Array(_) => Some("array".to_string()), Expr::ArrayIndex { obj, index: _ } => derive_alias(&obj), @@ -852,6 +902,7 @@ fn data_type_to_alias(data_type: &AstDataType) -> Option { } AstDataType::Interval => "interval".to_string(), AstDataType::Regclass => "regclass".to_string(), + AstDataType::Regproc => "regproc".to_string(), AstDataType::Text => "text".to_string(), AstDataType::Bytea => "bytea".to_string(), AstDataType::Array(ty) => return data_type_to_alias(ty), diff --git a/src/frontend/src/binder/statement.rs b/src/frontend/src/binder/statement.rs index 1a94a6ce30d2f..027f78e7705a9 100644 --- a/src/frontend/src/binder/statement.rs +++ b/src/frontend/src/binder/statement.rs @@ -32,18 +32,18 @@ pub enum BoundStatement { impl BoundStatement { pub fn output_fields(&self) -> Vec { match self { - BoundStatement::Insert(i) => i.returning_schema.as_ref().map_or( - vec![Field::unnamed(risingwave_common::types::DataType::Int64)], - |s| s.fields().into(), - ), - BoundStatement::Delete(d) => d.returning_schema.as_ref().map_or( - vec![Field::unnamed(risingwave_common::types::DataType::Int64)], - |s| s.fields().into(), - ), - BoundStatement::Update(u) => u.returning_schema.as_ref().map_or( - vec![Field::unnamed(risingwave_common::types::DataType::Int64)], - |s| s.fields().into(), - ), + BoundStatement::Insert(i) => i + .returning_schema + .as_ref() + .map_or(vec![], |s| s.fields().into()), + BoundStatement::Delete(d) => d + .returning_schema + .as_ref() + .map_or(vec![], |s| s.fields().into()), + BoundStatement::Update(u) => u + .returning_schema + .as_ref() + .map_or(vec![], |s| s.fields().into()), BoundStatement::Query(q) => q.schema().fields().into(), } } diff --git a/src/frontend/src/catalog/catalog_service.rs b/src/frontend/src/catalog/catalog_service.rs index df3fce39004a1..8eb6b9e3e4485 100644 --- a/src/frontend/src/catalog/catalog_service.rs +++ b/src/frontend/src/catalog/catalog_service.rs @@ -21,7 +21,7 @@ use risingwave_common::error::ErrorCode::InternalError; use risingwave_common::error::{Result, RwError}; use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_pb::catalog::{ - PbDatabase, PbFunction, PbIndex, PbSchema, PbSink, PbSource, PbTable, PbView, + PbCreateType, PbDatabase, PbFunction, PbIndex, PbSchema, PbSink, PbSource, PbTable, PbView, }; use risingwave_pb::ddl_service::alter_relation_name_request::Relation; use risingwave_pb::ddl_service::create_connection_request; @@ -81,6 +81,7 @@ pub trait CatalogWriter: Send + Sync { async fn replace_table( &self, + source: Option, table: PbTable, graph: StreamFragmentGraph, mapping: ColIndexMapping, @@ -190,11 +191,15 @@ impl CatalogWriter for CatalogWriterImpl { table: PbTable, graph: StreamFragmentGraph, ) -> Result<()> { + let create_type = table.get_create_type().unwrap_or(PbCreateType::Foreground); let (_, version) = self .meta_client .create_materialized_view(table, graph) .await?; - self.wait_version(version).await + if matches!(create_type, PbCreateType::Foreground) { + self.wait_version(version).await? + } + Ok(()) } async fn create_view(&self, view: PbView) -> Result<()> { @@ -229,13 +234,14 @@ impl CatalogWriter for CatalogWriterImpl { async fn replace_table( &self, + source: Option, table: PbTable, graph: StreamFragmentGraph, mapping: ColIndexMapping, ) -> Result<()> { let version = self .meta_client - .replace_table(table, graph, mapping) + .replace_table(source, table, graph, mapping) .await?; self.wait_version(version).await } diff --git a/src/frontend/src/catalog/connection_catalog.rs b/src/frontend/src/catalog/connection_catalog.rs index f049011748d17..7913d04379cd5 100644 --- a/src/frontend/src/catalog/connection_catalog.rs +++ b/src/frontend/src/catalog/connection_catalog.rs @@ -90,7 +90,8 @@ pub(crate) fn resolve_private_link_connection( if svc.get_provider()? == PrivateLinkProvider::Mock { return Ok(()); } - insert_privatelink_broker_rewrite_map(svc, properties).map_err(RwError::from)?; + insert_privatelink_broker_rewrite_map(properties, Some(svc), None) + .map_err(RwError::from)?; } Ok(()) } diff --git a/src/frontend/src/catalog/index_catalog.rs b/src/frontend/src/catalog/index_catalog.rs index caf0557b2fd09..ca4b4036332d3 100644 --- a/src/frontend/src/catalog/index_catalog.rs +++ b/src/frontend/src/catalog/index_catalog.rs @@ -21,7 +21,7 @@ use itertools::Itertools; use risingwave_common::catalog::IndexId; use risingwave_common::util::epoch::Epoch; use risingwave_common::util::sort_util::ColumnOrder; -use risingwave_pb::catalog::PbIndex; +use risingwave_pb::catalog::{PbIndex, PbStreamJobStatus}; use super::ColumnId; use crate::catalog::{DatabaseId, OwnedByUserCatalog, SchemaId, TableCatalog}; @@ -184,6 +184,7 @@ impl IndexCatalog { original_columns: self.original_columns.iter().map(Into::into).collect_vec(), initialized_at_epoch: self.initialized_at_epoch.map(|e| e.0), created_at_epoch: self.created_at_epoch.map(|e| e.0), + stream_job_status: PbStreamJobStatus::Creating.into(), } } diff --git a/src/frontend/src/catalog/mod.rs b/src/frontend/src/catalog/mod.rs index 211f8ff1bd07b..ad4e3ae18c954 100644 --- a/src/frontend/src/catalog/mod.rs +++ b/src/frontend/src/catalog/mod.rs @@ -37,6 +37,7 @@ pub(crate) mod system_catalog; pub(crate) mod table_catalog; pub(crate) mod view_catalog; +pub(crate) use catalog_service::CatalogReader; pub use index_catalog::IndexCatalog; pub use table_catalog::TableCatalog; diff --git a/src/frontend/src/catalog/system_catalog/information_schema/columns.rs b/src/frontend/src/catalog/system_catalog/information_schema/columns.rs index 3f8c6726f529c..136451237b52d 100644 --- a/src/frontend/src/catalog/system_catalog/information_schema/columns.rs +++ b/src/frontend/src/catalog/system_catalog/information_schema/columns.rs @@ -63,7 +63,8 @@ pub static INFORMATION_SCHEMA_COLUMNS: LazyLock = LazyLock::new(|| c.udt_type AS udt_name \ FROM rw_catalog.rw_columns c \ LEFT JOIN rw_catalog.rw_relations r ON c.relation_id = r.id \ - JOIN rw_catalog.rw_schemas s ON s.id = r.schema_id\ + JOIN rw_catalog.rw_schemas s ON s.id = r.schema_id \ + WHERE c.is_hidden = false\ " .to_string(), }); diff --git a/src/frontend/src/catalog/system_catalog/mod.rs b/src/frontend/src/catalog/system_catalog/mod.rs index 5d4a785bd4513..4cd271f0495b9 100644 --- a/src/frontend/src/catalog/system_catalog/mod.rs +++ b/src/frontend/src/catalog/system_catalog/mod.rs @@ -249,7 +249,7 @@ fn get_acl_items( .unwrap() .iter() .for_each(|(action, option)| { - let str = match Action::from_i32(*action).unwrap() { + let str = match Action::try_from(*action).unwrap() { Action::Select => "r", Action::Insert => "a", Action::Update => "w", @@ -403,6 +403,15 @@ prepare_sys_catalog! { { BuiltinCatalog::View(&RW_RELATIONS) }, { BuiltinCatalog::Table(&RW_COLUMNS), read_rw_columns_info }, { BuiltinCatalog::Table(&RW_TYPES), read_rw_types }, + { BuiltinCatalog::Table(&RW_HUMMOCK_PINNED_VERSIONS), read_hummock_pinned_versions await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_PINNED_SNAPSHOTS), read_hummock_pinned_snapshots await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_CURRENT_VERSION), read_hummock_current_version await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_CHECKPOINT_VERSION), read_hummock_checkpoint_version await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_SSTABLES), read_hummock_sstables await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_VERSION_DELTAS), read_hummock_version_deltas await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_BRANCHED_OBJECTS), read_hummock_branched_objects await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_COMPACTION_GROUP_CONFIGS), read_hummock_compaction_group_configs await }, + { BuiltinCatalog::Table(&RW_HUMMOCK_META_CONFIGS), read_hummock_meta_configs await}, } #[cfg(test)] diff --git a/src/frontend/src/catalog/system_catalog/pg_catalog/pg_attribute.rs b/src/frontend/src/catalog/system_catalog/pg_catalog/pg_attribute.rs index 4ac5ccbe4de03..ff75778182d0e 100644 --- a/src/frontend/src/catalog/system_catalog/pg_catalog/pg_attribute.rs +++ b/src/frontend/src/catalog/system_catalog/pg_catalog/pg_attribute.rs @@ -53,7 +53,8 @@ pub static PG_ATTRIBUTE: LazyLock = LazyLock::new(|| BuiltinView { ''::varchar AS attidentity, \ ''::varchar AS attgenerated, \ -1 AS atttypmod \ - FROM rw_catalog.rw_columns c\ + FROM rw_catalog.rw_columns c \ + WHERE c.is_hidden = false\ " .to_string(), }); diff --git a/src/frontend/src/catalog/system_catalog/pg_catalog/pg_type.rs b/src/frontend/src/catalog/system_catalog/pg_catalog/pg_type.rs index ea9fde4d0afc1..af6a2968e3a8c 100644 --- a/src/frontend/src/catalog/system_catalog/pg_catalog/pg_type.rs +++ b/src/frontend/src/catalog/system_catalog/pg_catalog/pg_type.rs @@ -31,6 +31,8 @@ pub static PG_TYPE: LazyLock = LazyLock::new(|| BuiltinView { (DataType::Int32, "typelem"), // 0 (DataType::Int32, "typarray"), + // FIXME: Should be regproc type + (DataType::Varchar, "typinput"), // false (DataType::Boolean, "typnotnull"), // 0 @@ -58,6 +60,7 @@ pub static PG_TYPE: LazyLock = LazyLock::new(|| BuiltinView { t.name AS typname, \ 0 AS typelem, \ 0 AS typarray, \ + t.input_oid AS typinput, \ false AS typnotnull, \ 0 AS typbasetype, \ -1 AS typtypmod, \ diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/mod.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/mod.rs index 943f1e5d69ae7..9f89c9eed5e81 100644 --- a/src/frontend/src/catalog/system_catalog/rw_catalog/mod.rs +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/mod.rs @@ -19,6 +19,13 @@ mod rw_databases; mod rw_ddl_progress; mod rw_fragments; mod rw_functions; +mod rw_hummock_branched_objects; +mod rw_hummock_compaction_group_configs; +mod rw_hummock_meta_configs; +mod rw_hummock_pinned_snapshots; +mod rw_hummock_pinned_versions; +mod rw_hummock_version; +mod rw_hummock_version_deltas; mod rw_indexes; mod rw_materialized_views; mod rw_meta_snapshot; @@ -45,6 +52,13 @@ pub use rw_databases::*; pub use rw_ddl_progress::*; pub use rw_fragments::*; pub use rw_functions::*; +pub use rw_hummock_branched_objects::*; +pub use rw_hummock_compaction_group_configs::*; +pub use rw_hummock_meta_configs::*; +pub use rw_hummock_pinned_snapshots::*; +pub use rw_hummock_pinned_versions::*; +pub use rw_hummock_version::*; +pub use rw_hummock_version_deltas::*; pub use rw_indexes::*; pub use rw_materialized_views::*; pub use rw_meta_snapshot::*; diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_columns.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_columns.rs index a1b867f15eded..dd8cc72ed9bf5 100644 --- a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_columns.rs +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_columns.rs @@ -29,6 +29,9 @@ pub static RW_COLUMNS: LazyLock = LazyLock::new(|| BuiltinTable { (DataType::Int32, "relation_id"), // belonged relation id (DataType::Varchar, "name"), // column name (DataType::Int32, "position"), // 1-indexed position + (DataType::Boolean, "is_hidden"), + (DataType::Boolean, "is_primary_key"), + (DataType::Boolean, "is_distribution_key"), (DataType::Varchar, "data_type"), (DataType::Int32, "type_oid"), (DataType::Int16, "type_len"), @@ -50,6 +53,9 @@ impl SysCatalogReaderImpl { Some(ScalarImpl::Int32(view.id as i32)), Some(ScalarImpl::Utf8(column.name.clone().into())), Some(ScalarImpl::Int32(index as i32 + 1)), + Some(ScalarImpl::Bool(false)), + Some(ScalarImpl::Bool(false)), + Some(ScalarImpl::Bool(false)), Some(ScalarImpl::Utf8(column.data_type().to_string().into())), Some(ScalarImpl::Int32(column.data_type().to_oid())), Some(ScalarImpl::Int16(column.data_type().type_len())), @@ -58,24 +64,49 @@ impl SysCatalogReaderImpl { }) }); + let rows = schema + .iter_system_tables() + .flat_map(|table| { + table + .columns + .iter() + .enumerate() + .map(move |(index, column)| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int32(table.id.table_id as i32)), + Some(ScalarImpl::Utf8(column.name().into())), + Some(ScalarImpl::Int32(index as i32 + 1)), + Some(ScalarImpl::Bool(column.is_hidden)), + Some(ScalarImpl::Bool(table.pk.contains(&index))), + Some(ScalarImpl::Bool(false)), + Some(ScalarImpl::Utf8(column.data_type().to_string().into())), + Some(ScalarImpl::Int32(column.data_type().to_oid())), + Some(ScalarImpl::Int16(column.data_type().type_len())), + Some(ScalarImpl::Utf8(column.data_type().pg_name().into())), + ]) + }) + }) + .chain(view_rows); + schema .iter_valid_table() - .map(|table| (table.id.table_id(), table.columns())) - .chain( - schema - .iter_system_tables() - .map(|table| (table.id.table_id(), table.columns())), - ) - .flat_map(|(id, columns)| { - columns + .flat_map(|table| { + table + .columns .iter() .enumerate() - .filter(|(_, column)| !column.is_hidden()) .map(move |(index, column)| { OwnedRow::new(vec![ - Some(ScalarImpl::Int32(id as i32)), + Some(ScalarImpl::Int32(table.id.table_id as i32)), Some(ScalarImpl::Utf8(column.name().into())), Some(ScalarImpl::Int32(index as i32 + 1)), + Some(ScalarImpl::Bool(column.is_hidden)), + Some(ScalarImpl::Bool( + table.pk().iter().any(|idx| idx.column_index == index), + )), + Some(ScalarImpl::Bool( + table.distribution_key().contains(&index), + )), Some(ScalarImpl::Utf8(column.data_type().to_string().into())), Some(ScalarImpl::Int32(column.data_type().to_oid())), Some(ScalarImpl::Int16(column.data_type().type_len())), @@ -83,7 +114,7 @@ impl SysCatalogReaderImpl { ]) }) }) - .chain(view_rows) + .chain(rows) }) .collect_vec()) } diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_fragments.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_fragments.rs index c114a076a4dbb..a4eda730d8941 100644 --- a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_fragments.rs +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_fragments.rs @@ -35,6 +35,7 @@ pub static RW_FRAGMENTS_COLUMNS: LazyLock>> = La "upstream_fragment_ids", ), (DataType::List(Box::new(DataType::Varchar)), "flags"), + (DataType::Int32, "parallelism"), ] }); @@ -51,9 +52,9 @@ impl SysCatalogReaderImpl { for i in 0..32 { let bit = 1 << i; if mask & bit != 0 { - match FragmentTypeFlag::from_i32(bit as i32) { - None => continue, - Some(flag) => result.push(flag), + match FragmentTypeFlag::try_from(bit as i32) { + Err(_) => continue, + Ok(flag) => result.push(flag), }; } } @@ -93,6 +94,7 @@ impl SysCatalogReaderImpl { .map(|t| Some(ScalarImpl::Utf8(t.into()))) .collect_vec(), ))), + Some(ScalarImpl::Int32(distribution.parallelism as i32)), ]) }) .collect_vec()) diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_branched_objects.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_branched_objects.rs new file mode 100644 index 0000000000000..5e9ad57107690 --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_branched_objects.rs @@ -0,0 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_BRANCHED_OBJECTS: BuiltinTable = BuiltinTable { + name: "rw_hummock_branched_objects", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int64, "object_id"), + (DataType::Int64, "sst_id"), + (DataType::Int64, "compaction_group_id"), + ], + pk: &[], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_branched_objects(&self) -> Result> { + let branched_objects = self.meta_client.list_branched_objects().await?; + let rows = branched_objects + .into_iter() + .map(|o| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int64(o.object_id as _)), + Some(ScalarImpl::Int64(o.sst_id as _)), + Some(ScalarImpl::Int64(o.compaction_group_id as _)), + ]) + }) + .collect(); + Ok(rows) + } +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_compaction_group_configs.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_compaction_group_configs.rs new file mode 100644 index 0000000000000..758d639388f7e --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_compaction_group_configs.rs @@ -0,0 +1,72 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; +use serde_json::json; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_COMPACTION_GROUP_CONFIGS: BuiltinTable = BuiltinTable { + name: "rw_hummock_compaction_group_configs", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int64, "id"), + (DataType::Int64, "parent_id"), + (DataType::Jsonb, "member_tables"), + (DataType::Jsonb, "compaction_config"), + (DataType::Jsonb, "active_write_limit"), + ], + pk: &[0], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_compaction_group_configs(&self) -> Result> { + let info = self + .meta_client + .list_hummock_compaction_group_configs() + .await?; + let mut write_limits = self.meta_client.list_hummock_active_write_limits().await?; + let mut rows = info + .into_iter() + .map(|i| { + let active_write_limit = write_limits + .remove(&i.id) + .map(|w| ScalarImpl::Jsonb(json!(w).into())); + OwnedRow::new(vec![ + Some(ScalarImpl::Int64(i.id as _)), + Some(ScalarImpl::Int64(i.parent_id as _)), + Some(ScalarImpl::Jsonb(json!(i.member_table_ids).into())), + Some(ScalarImpl::Jsonb(json!(i.compaction_config).into())), + active_write_limit, + ]) + }) + .collect_vec(); + // As compaction group configs and active write limits are fetched via two RPCs, it's possible there's inconsistency. + // Just leave unknown field blank. + rows.extend(write_limits.into_iter().map(|(cg, w)| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int64(cg as _)), + None, + None, + None, + Some(ScalarImpl::Jsonb(json!(w).into())), + ]) + })); + Ok(rows) + } +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_meta_configs.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_meta_configs.rs new file mode 100644 index 0000000000000..e28dc0a926c22 --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_meta_configs.rs @@ -0,0 +1,50 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_META_CONFIGS: BuiltinTable = BuiltinTable { + name: "rw_hummock_meta_configs", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Varchar, "config_name"), + (DataType::Varchar, "config_value"), + ], + pk: &[0], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_meta_configs(&self) -> Result> { + let configs = self + .meta_client + .list_hummock_meta_configs() + .await? + .into_iter() + .sorted() + .map(|(k, v)| { + OwnedRow::new(vec![ + Some(ScalarImpl::Utf8(k.into())), + Some(ScalarImpl::Utf8(v.into())), + ]) + }) + .collect_vec(); + Ok(configs) + } +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_pinned_snapshots.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_pinned_snapshots.rs new file mode 100644 index 0000000000000..8628e2562e698 --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_pinned_snapshots.rs @@ -0,0 +1,49 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_PINNED_SNAPSHOTS: BuiltinTable = BuiltinTable { + name: "rw_hummock_pinned_snapshots", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int32, "worker_node_id"), + (DataType::Int64, "min_pinned_snapshot_id"), + ], + pk: &[], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_pinned_snapshots(&self) -> Result> { + let pinned_snapshots = self + .meta_client + .list_hummock_pinned_snapshots() + .await? + .into_iter() + .map(|s| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int32(s.0 as i32)), + Some(ScalarImpl::Int64(s.1 as i64)), + ]) + }) + .collect_vec(); + Ok(pinned_snapshots) + } +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_pinned_versions.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_pinned_versions.rs new file mode 100644 index 0000000000000..87b9804d8a26a --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_pinned_versions.rs @@ -0,0 +1,49 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_PINNED_VERSIONS: BuiltinTable = BuiltinTable { + name: "rw_hummock_pinned_versions", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int32, "worker_node_id"), + (DataType::Int64, "min_pinned_version_id"), + ], + pk: &[], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_pinned_versions(&self) -> Result> { + let pinned_versions = self + .meta_client + .list_hummock_pinned_versions() + .await? + .into_iter() + .map(|s| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int32(s.0 as i32)), + Some(ScalarImpl::Int64(s.1 as i64)), + ]) + }) + .collect_vec(); + Ok(pinned_versions) + } +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_version.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_version.rs new file mode 100644 index 0000000000000..97269341d59f3 --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_version.rs @@ -0,0 +1,163 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; +use risingwave_pb::hummock::HummockVersion; +use serde_json::json; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_CURRENT_VERSION: BuiltinTable = BuiltinTable { + name: "rw_hummock_current_version", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int64, "version_id"), + (DataType::Int64, "max_committed_epoch"), + (DataType::Int64, "safe_epoch"), + (DataType::Jsonb, "compaction_group"), + ], + pk: &[], +}; + +pub const RW_HUMMOCK_CHECKPOINT_VERSION: BuiltinTable = BuiltinTable { + name: "rw_hummock_checkpoint_version", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int64, "version_id"), + (DataType::Int64, "max_committed_epoch"), + (DataType::Int64, "safe_epoch"), + (DataType::Jsonb, "compaction_group"), + ], + pk: &[], +}; + +pub const RW_HUMMOCK_SSTABLES: BuiltinTable = BuiltinTable { + name: "rw_hummock_sstables", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int64, "sstable_id"), + (DataType::Int64, "object_id"), + (DataType::Int64, "compaction_group_id"), + (DataType::Int32, "level_id"), + (DataType::Int64, "sub_level_id"), + (DataType::Int32, "level_type"), + (DataType::Bytea, "key_range_left"), + (DataType::Bytea, "key_range_right"), + (DataType::Boolean, "right_exclusive"), + (DataType::Int64, "file_size"), + (DataType::Int64, "meta_offset"), + (DataType::Int64, "stale_key_count"), + (DataType::Int64, "total_key_count"), + (DataType::Int64, "min_epoch"), + (DataType::Int64, "max_epoch"), + (DataType::Int64, "uncompressed_file_size"), + (DataType::Int64, "range_tombstone_count"), + (DataType::Int32, "bloom_filter_kind"), + (DataType::Jsonb, "table_ids"), + ], + pk: &[0], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_current_version(&self) -> Result> { + let version = self.meta_client.get_hummock_current_version().await?; + Ok(version_to_compaction_group_rows( + &remove_key_range_from_version(version), + )) + } + + pub async fn read_hummock_checkpoint_version(&self) -> Result> { + let version = self.meta_client.get_hummock_checkpoint_version().await?; + Ok(version_to_compaction_group_rows( + &remove_key_range_from_version(version), + )) + } + + pub async fn read_hummock_sstables(&self) -> Result> { + let version = self.meta_client.get_hummock_current_version().await?; + Ok(version_to_sstable_rows(version)) + } +} + +fn remove_key_range_from_version(mut version: HummockVersion) -> HummockVersion { + // Because key range is too verbose for manual analysis, just don't expose it. + for cg in version.levels.values_mut() { + for level in cg + .levels + .iter_mut() + .chain(cg.l0.as_mut().unwrap().sub_levels.iter_mut()) + { + for sst in &mut level.table_infos { + sst.key_range.take(); + } + } + } + version +} + +fn version_to_compaction_group_rows(version: &HummockVersion) -> Vec { + version + .levels + .values() + .map(|cg| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int64(version.id as _)), + Some(ScalarImpl::Int64(version.max_committed_epoch as _)), + Some(ScalarImpl::Int64(version.safe_epoch as _)), + Some(ScalarImpl::Jsonb(json!(cg).into())), + ]) + }) + .collect() +} + +fn version_to_sstable_rows(version: HummockVersion) -> Vec { + let mut sstables = vec![]; + for cg in version.levels.into_values() { + for level in cg.levels.into_iter().chain(cg.l0.unwrap().sub_levels) { + for sst in level.table_infos { + let key_range = sst.key_range.unwrap(); + let sub_level_id = if level.level_idx > 0 { + None + } else { + Some(ScalarImpl::Int64(level.sub_level_id as _)) + }; + sstables.push(OwnedRow::new(vec![ + Some(ScalarImpl::Int64(sst.sst_id as _)), + Some(ScalarImpl::Int64(sst.object_id as _)), + Some(ScalarImpl::Int64(cg.group_id as _)), + Some(ScalarImpl::Int32(level.level_idx as _)), + sub_level_id, + Some(ScalarImpl::Int32(level.level_type as _)), + Some(ScalarImpl::Bytea(key_range.left.into())), + Some(ScalarImpl::Bytea(key_range.right.into())), + Some(ScalarImpl::Bool(key_range.right_exclusive)), + Some(ScalarImpl::Int64(sst.file_size as _)), + Some(ScalarImpl::Int64(sst.meta_offset as _)), + Some(ScalarImpl::Int64(sst.stale_key_count as _)), + Some(ScalarImpl::Int64(sst.total_key_count as _)), + Some(ScalarImpl::Int64(sst.min_epoch as _)), + Some(ScalarImpl::Int64(sst.max_epoch as _)), + Some(ScalarImpl::Int64(sst.uncompressed_file_size as _)), + Some(ScalarImpl::Int64(sst.range_tombstone_count as _)), + Some(ScalarImpl::Int32(sst.bloom_filter_kind as _)), + Some(ScalarImpl::Jsonb(json!(sst.table_ids).into())), + ])); + } + } + } + sstables +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_version_deltas.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_version_deltas.rs new file mode 100644 index 0000000000000..059fa5d7d47da --- /dev/null +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_hummock_version_deltas.rs @@ -0,0 +1,57 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::catalog::RW_CATALOG_SCHEMA_NAME; +use risingwave_common::error::Result; +use risingwave_common::row::OwnedRow; +use risingwave_common::types::{DataType, ScalarImpl}; +use serde_json::json; + +use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; + +pub const RW_HUMMOCK_VERSION_DELTAS: BuiltinTable = BuiltinTable { + name: "rw_hummock_version_deltas", + schema: RW_CATALOG_SCHEMA_NAME, + columns: &[ + (DataType::Int64, "id"), + (DataType::Int64, "prev_id"), + (DataType::Int64, "max_committed_epoch"), + (DataType::Int64, "safe_epoch"), + (DataType::Boolean, "trivial_move"), + (DataType::Jsonb, "gc_object_ids"), + (DataType::Jsonb, "group_deltas"), + ], + pk: &[0], +}; + +impl SysCatalogReaderImpl { + pub async fn read_hummock_version_deltas(&self) -> Result> { + let deltas = self.meta_client.list_version_deltas().await?; + let rows = deltas + .into_iter() + .map(|d| { + OwnedRow::new(vec![ + Some(ScalarImpl::Int64(d.id as _)), + Some(ScalarImpl::Int64(d.prev_id as _)), + Some(ScalarImpl::Int64(d.max_committed_epoch as _)), + Some(ScalarImpl::Int64(d.safe_epoch as _)), + Some(ScalarImpl::Bool(d.trivial_move)), + Some(ScalarImpl::Jsonb(json!(d.gc_object_ids).into())), + Some(ScalarImpl::Jsonb(json!(d.group_deltas).into())), + ]) + }) + .collect(); + Ok(rows) + } +} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_types.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_types.rs index ae79660e69607..02462c4cee303 100644 --- a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_types.rs +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_types.rs @@ -24,25 +24,29 @@ use risingwave_common::types::{DataType, ScalarImpl}; use crate::catalog::system_catalog::{BuiltinTable, SysCatalogReaderImpl}; macro_rules! impl_pg_type_data { - ($( { $enum:ident | $oid:literal | $oid_array:literal | $name:ident | $len:literal } )*) => { + ($( { $enum:ident | $oid:literal | $oid_array:literal | $name:ident | $input:ident | $len:literal } )*) => { &[ $( - ($oid, stringify!($name)), + ($oid, stringify!($name), stringify!($input)), )* // Note: rw doesn't support `text` type, returning it is just a workaround to be compatible // with PostgreSQL. - (25, "text"), - (1301, "rw_int256"), + (25, "text", "textin"), + (1301, "rw_int256", "rw_int256_in"), ] } } -pub const RW_TYPE_DATA: &[(i32, &str)] = for_all_base_types! { impl_pg_type_data }; +pub const RW_TYPE_DATA: &[(i32, &str, &str)] = for_all_base_types! { impl_pg_type_data }; /// `rw_types` stores all supported types in the database. pub static RW_TYPES: LazyLock = LazyLock::new(|| BuiltinTable { name: "rw_types", schema: RW_CATALOG_SCHEMA_NAME, - columns: &[(DataType::Int32, "id"), (DataType::Varchar, "name")], + columns: &[ + (DataType::Int32, "id"), + (DataType::Varchar, "name"), + (DataType::Varchar, "input_oid"), + ], pk: &[0], }); @@ -50,10 +54,11 @@ impl SysCatalogReaderImpl { pub fn read_rw_types(&self) -> Result> { Ok(RW_TYPE_DATA .iter() - .map(|(id, name)| { + .map(|(id, name, input)| { OwnedRow::new(vec![ Some(ScalarImpl::Int32(*id)), Some(ScalarImpl::Utf8(name.to_string().into())), + Some(ScalarImpl::Utf8(input.to_string().into())), ]) }) .collect_vec()) diff --git a/src/frontend/src/catalog/table_catalog.rs b/src/frontend/src/catalog/table_catalog.rs index 6c83df13e80be..b0f9088132f59 100644 --- a/src/frontend/src/catalog/table_catalog.rs +++ b/src/frontend/src/catalog/table_catalog.rs @@ -24,7 +24,7 @@ use risingwave_common::error::{ErrorCode, RwError}; use risingwave_common::util::epoch::Epoch; use risingwave_common::util::sort_util::ColumnOrder; use risingwave_pb::catalog::table::{OptionalAssociatedSourceId, PbTableType, PbTableVersion}; -use risingwave_pb::catalog::PbTable; +use risingwave_pb::catalog::{PbCreateType, PbStreamJobStatus, PbTable}; use risingwave_pb::plan_common::column_desc::GeneratedOrDefaultColumn; use risingwave_pb::plan_common::DefaultColumnDesc; @@ -149,6 +149,41 @@ pub struct TableCatalog { /// Indicate whether to use watermark cache for state table. pub cleaned_by_watermark: bool, + + /// Indicate whether to create table in background or foreground. + pub create_type: CreateType, +} + +// How the stream job was created will determine +// whether they are persisted. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum CreateType { + Background, + Foreground, +} + +#[cfg(test)] +impl Default for CreateType { + fn default() -> Self { + Self::Foreground + } +} + +impl CreateType { + fn from_prost(prost: PbCreateType) -> Self { + match prost { + PbCreateType::Background => Self::Background, + PbCreateType::Foreground => Self::Foreground, + PbCreateType::Unspecified => unreachable!(), + } + } + + pub(crate) fn to_prost(self) -> PbCreateType { + match self { + Self::Background => PbCreateType::Background, + Self::Foreground => PbCreateType::Foreground, + } + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] @@ -401,6 +436,8 @@ impl TableCatalog { initialized_at_epoch: self.initialized_at_epoch.map(|epoch| epoch.0), created_at_epoch: self.created_at_epoch.map(|epoch| epoch.0), cleaned_by_watermark: self.cleaned_by_watermark, + stream_job_status: PbStreamJobStatus::Creating.into(), + create_type: self.create_type.to_prost().into(), } } @@ -427,23 +464,20 @@ impl TableCatalog { } pub fn default_columns(&self) -> impl Iterator + '_ { - self.columns - .iter() - .enumerate() - .filter(|(_, c)| c.is_default()) - .map(|(i, c)| { - if let GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc { expr }) = - c.column_desc.generated_or_default_column.clone().unwrap() - { - ( - i, - ExprImpl::from_expr_proto(&expr.unwrap()) - .expect("expr in default columns corrupted"), - ) - } else { - unreachable!() - } - }) + self.columns.iter().enumerate().filter_map(|(i, c)| { + if let Some(GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc { + expr, .. + })) = c.column_desc.generated_or_default_column.as_ref() + { + Some(( + i, + ExprImpl::from_expr_proto(expr.as_ref().unwrap()) + .expect("expr in default columns corrupted"), + )) + } else { + None + } + }) } pub fn has_generated_column(&self) -> bool { @@ -456,6 +490,7 @@ impl From for TableCatalog { let id = tb.id; let tb_conflict_behavior = tb.handle_pk_conflict_behavior(); let table_type = tb.get_table_type().unwrap(); + let create_type = tb.get_create_type().unwrap_or(PbCreateType::Foreground); let associated_source_id = tb.optional_associated_source_id.map(|id| match id { OptionalAssociatedSourceId::AssociatedSourceId(id) => id, }); @@ -515,6 +550,7 @@ impl From for TableCatalog { created_at_epoch: tb.created_at_epoch.map(Epoch::from), initialized_at_epoch: tb.initialized_at_epoch.map(Epoch::from), cleaned_by_watermark: matches!(tb.cleaned_by_watermark, true), + create_type: CreateType::from_prost(create_type), } } } @@ -542,7 +578,7 @@ mod tests { use risingwave_common::test_prelude::*; use risingwave_common::types::*; use risingwave_common::util::sort_util::OrderType; - use risingwave_pb::catalog::PbTable; + use risingwave_pb::catalog::{PbStreamJobStatus, PbTable}; use risingwave_pb::plan_common::{PbColumnCatalog, PbColumnDesc}; use super::*; @@ -605,6 +641,8 @@ mod tests { cardinality: None, created_at_epoch: None, cleaned_by_watermark: false, + stream_job_status: PbStreamJobStatus::Creating.into(), + create_type: PbCreateType::Foreground.into(), } .into(); @@ -659,6 +697,7 @@ mod tests { created_at_epoch: None, initialized_at_epoch: None, cleaned_by_watermark: false, + create_type: CreateType::Foreground, } ); assert_eq!(table, TableCatalog::from(table.to_prost(0, 0))); diff --git a/src/frontend/src/expr/agg_call.rs b/src/frontend/src/expr/agg_call.rs index c8d552508b30f..c9fe56b841290 100644 --- a/src/frontend/src/expr/agg_call.rs +++ b/src/frontend/src/expr/agg_call.rs @@ -15,8 +15,8 @@ use itertools::Itertools; use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_common::types::DataType; -use risingwave_expr::agg::AggKind; -use risingwave_expr::sig::agg::AGG_FUNC_SIG_MAP; +use risingwave_expr::aggregate::AggKind; +use risingwave_expr::sig::FUNCTION_REGISTRY; use super::{Expr, ExprImpl, Literal, OrderBy}; use crate::utils::Condition; @@ -70,11 +70,6 @@ impl AggCall { // min/max allowed for all types except for bool and jsonb (#7981) (AggKind::Min | AggKind::Max, [DataType::Jsonb]) => return Err(err()), - // may return list or struct type - (AggKind::Min | AggKind::Max | AggKind::FirstValue | AggKind::LastValue, [input]) => { - input.clone() - } - (AggKind::ArrayAgg, [input]) => List(Box::new(input.clone())), // functions that are rewritten in the frontend and don't exist in the expr crate (AggKind::Avg, [input]) => match input { Int16 | Int32 | Int64 | Decimal => Decimal, @@ -90,21 +85,9 @@ impl AggCall { Float32 | Float64 | Int256 => Float64, _ => return Err(err()), }, - // Ordered-Set Aggregation - (AggKind::PercentileCont, [input]) => match input { - Float64 => Float64, - _ => return Err(err()), - }, - (AggKind::PercentileDisc | AggKind::Mode, [input]) => input.clone(), (AggKind::Grouping, _) => Int32, // other functions are handled by signature map - _ => { - let args = args.iter().map(|t| t.into()).collect::>(); - return match AGG_FUNC_SIG_MAP.get_return_type(agg_kind, &args) { - Some(t) => Ok(t.into()), - None => Err(err()), - }; - } + _ => FUNCTION_REGISTRY.get_return_type(agg_kind, args)?, }) } diff --git a/src/frontend/src/expr/expr_visitor.rs b/src/frontend/src/expr/expr_visitor.rs index 4fdfbb07f7518..5bc827b43aba8 100644 --- a/src/frontend/src/expr/expr_visitor.rs +++ b/src/frontend/src/expr/expr_visitor.rs @@ -25,13 +25,15 @@ use super::{ /// /// Note: The default implementation for `visit_subquery` is a no-op, i.e., expressions inside /// subqueries are not traversed. -pub trait ExprVisitor { +pub trait ExprVisitor { + type Result: Default; + /// This merge function is used to reduce results of expr inputs. /// In order to always remind users to implement themselves, we don't provide an default /// implementation. - fn merge(a: R, b: R) -> R; + fn merge(a: Self::Result, b: Self::Result) -> Self::Result; - fn visit_expr(&mut self, expr: &ExprImpl) -> R { + fn visit_expr(&mut self, expr: &ExprImpl) -> Self::Result { match expr { ExprImpl::InputRef(inner) => self.visit_input_ref(inner), ExprImpl::Literal(inner) => self.visit_literal(inner), @@ -47,7 +49,7 @@ pub trait ExprVisitor { ExprImpl::Now(inner) => self.visit_now(inner), } } - fn visit_function_call(&mut self, func_call: &FunctionCall) -> R { + fn visit_function_call(&mut self, func_call: &FunctionCall) -> Self::Result { func_call .inputs() .iter() @@ -55,10 +57,13 @@ pub trait ExprVisitor { .reduce(Self::merge) .unwrap_or_default() } - fn visit_function_call_with_lambda(&mut self, func_call: &FunctionCallWithLambda) -> R { + fn visit_function_call_with_lambda( + &mut self, + func_call: &FunctionCallWithLambda, + ) -> Self::Result { self.visit_function_call(func_call.base()) } - fn visit_agg_call(&mut self, agg_call: &AggCall) -> R { + fn visit_agg_call(&mut self, agg_call: &AggCall) -> Self::Result { let mut r = agg_call .args() .iter() @@ -69,22 +74,22 @@ pub trait ExprVisitor { r = Self::merge(r, agg_call.filter().visit_expr(self)); r } - fn visit_parameter(&mut self, _: &Parameter) -> R { - R::default() + fn visit_parameter(&mut self, _: &Parameter) -> Self::Result { + Self::Result::default() } - fn visit_literal(&mut self, _: &Literal) -> R { - R::default() + fn visit_literal(&mut self, _: &Literal) -> Self::Result { + Self::Result::default() } - fn visit_input_ref(&mut self, _: &InputRef) -> R { - R::default() + fn visit_input_ref(&mut self, _: &InputRef) -> Self::Result { + Self::Result::default() } - fn visit_subquery(&mut self, _: &Subquery) -> R { - R::default() + fn visit_subquery(&mut self, _: &Subquery) -> Self::Result { + Self::Result::default() } - fn visit_correlated_input_ref(&mut self, _: &CorrelatedInputRef) -> R { - R::default() + fn visit_correlated_input_ref(&mut self, _: &CorrelatedInputRef) -> Self::Result { + Self::Result::default() } - fn visit_table_function(&mut self, func_call: &TableFunction) -> R { + fn visit_table_function(&mut self, func_call: &TableFunction) -> Self::Result { func_call .args .iter() @@ -92,7 +97,7 @@ pub trait ExprVisitor { .reduce(Self::merge) .unwrap_or_default() } - fn visit_window_function(&mut self, func_call: &WindowFunction) -> R { + fn visit_window_function(&mut self, func_call: &WindowFunction) -> Self::Result { func_call .args .iter() @@ -100,7 +105,7 @@ pub trait ExprVisitor { .reduce(Self::merge) .unwrap_or_default() } - fn visit_user_defined_function(&mut self, func_call: &UserDefinedFunction) -> R { + fn visit_user_defined_function(&mut self, func_call: &UserDefinedFunction) -> Self::Result { func_call .args .iter() @@ -108,7 +113,7 @@ pub trait ExprVisitor { .reduce(Self::merge) .unwrap_or_default() } - fn visit_now(&mut self, _: &Now) -> R { - R::default() + fn visit_now(&mut self, _: &Now) -> Self::Result { + Self::Result::default() } } diff --git a/src/frontend/src/expr/function_call.rs b/src/frontend/src/expr/function_call.rs index e826d43d9342a..f5e618892fc5e 100644 --- a/src/frontend/src/expr/function_call.rs +++ b/src/frontend/src/expr/function_call.rs @@ -15,9 +15,8 @@ use itertools::Itertools; use risingwave_common::catalog::Schema; use risingwave_common::error::{ErrorCode, Result as RwResult, RwError}; -use risingwave_common::types::DataType; +use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::vector_op::cast::literal_parsing; use thiserror::Error; use super::{cast_ok, infer_some_all, infer_type, CastContext, Expr, ExprImpl, Literal}; @@ -129,10 +128,7 @@ impl FunctionCall { let datum = literal .get_data() .as_ref() - .map(|scalar| { - let s = scalar.as_utf8(); - literal_parsing(&target, s) - }) + .map(|scalar| ScalarImpl::from_literal(scalar.as_utf8(), &target)) .transpose(); if let Ok(datum) = datum { *child = Literal::new(datum, target).into(); @@ -215,15 +211,6 @@ impl FunctionCall { match expr_type { ExprType::Some | ExprType::All => { let return_type = infer_some_all(func_types, &mut inputs)?; - - if return_type != DataType::Boolean { - return Err(ErrorCode::BindError(format!( - "op SOME/ANY/ALL (array) requires operator to yield boolean, but got {:?}", - return_type - )) - .into()); - } - Ok(FunctionCall::new_unchecked(expr_type, inputs, return_type).into()) } ExprType::Not | ExprType::IsNotNull | ExprType::IsNull => Ok(FunctionCall::new( diff --git a/src/frontend/src/expr/function_impl/cast_regclass.rs b/src/frontend/src/expr/function_impl/cast_regclass.rs new file mode 100644 index 0000000000000..e0f8670d791fb --- /dev/null +++ b/src/frontend/src/expr/function_impl/cast_regclass.rs @@ -0,0 +1,102 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::session_config::SearchPath; +use risingwave_expr::{capture_context, function, ExprError}; +use risingwave_sqlparser::parser::{Parser, ParserError}; +use risingwave_sqlparser::tokenizer::{Token, Tokenizer}; +use thiserror::Error; + +use super::context::{AUTH_CONTEXT, CATALOG_READER, DB_NAME, SEARCH_PATH}; +use crate::catalog::root_catalog::SchemaPath; +use crate::catalog::{CatalogError, CatalogReader}; +use crate::session::AuthContext; + +#[derive(Error, Debug)] +enum ResolveRegclassError { + #[error("parse object name failed: {0}")] + Parser(#[from] ParserError), + #[error("catalog error: {0}")] + Catalog(#[from] CatalogError), +} + +impl From for ExprError { + fn from(e: ResolveRegclassError) -> Self { + match e { + ResolveRegclassError::Parser(e) => ExprError::Parse(e.to_string().into_boxed_str()), + ResolveRegclassError::Catalog(e) => ExprError::InvalidParam { + name: "name", + reason: e.to_string().into_boxed_str(), + }, + } + } +} + +#[capture_context(CATALOG_READER, AUTH_CONTEXT, SEARCH_PATH, DB_NAME)] +fn resolve_regclass_impl( + catalog: &CatalogReader, + auth_context: &AuthContext, + search_path: &SearchPath, + db_name: &str, + class_name: &str, +) -> Result { + resolve_regclass_inner(catalog, auth_context, search_path, db_name, class_name) + .map_err(Into::into) +} + +fn resolve_regclass_inner( + catalog: &CatalogReader, + auth_context: &AuthContext, + search_path: &SearchPath, + db_name: &str, + class_name: &str, +) -> Result { + let obj = parse_object_name(class_name)?; + + if obj.0.len() == 1 { + let class_name = obj.0[0].real_value(); + let schema_path = SchemaPath::Path(search_path, &auth_context.user_name); + Ok(catalog + .read_guard() + .get_id_by_class_name(db_name, schema_path, &class_name)?) + } else { + let schema = obj.0[0].real_value(); + let class_name = obj.0[1].real_value(); + let schema_path = SchemaPath::Name(&schema); + Ok(catalog + .read_guard() + .get_id_by_class_name(db_name, schema_path, &class_name)?) + } +} + +fn parse_object_name(name: &str) -> Result { + // We use the full parser here because this function needs to accept every legal way + // of identifying an object in PG SQL as a valid value for the varchar + // literal. For example: 'foo', 'public.foo', '"my table"', and + // '"my schema".foo' must all work as values passed pg_table_size. + let mut tokenizer = Tokenizer::new(name); + let tokens = tokenizer + .tokenize_with_location() + .map_err(ParserError::from)?; + let mut parser = Parser::new(tokens); + let object = parser.parse_object_name()?; + parser.expect_token(&Token::EOF)?; + Ok(object) +} + +#[function("cast_regclass(varchar) -> int4")] +fn cast_regclass(class_name: &str) -> Result { + let oid = resolve_regclass_impl_captured(class_name)?; + Ok(oid as i32) +} diff --git a/src/frontend/src/expr/function_impl/col_description.rs b/src/frontend/src/expr/function_impl/col_description.rs new file mode 100644 index 0000000000000..4e0086d5afc78 --- /dev/null +++ b/src/frontend/src/expr/function_impl/col_description.rs @@ -0,0 +1,25 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Write; + +use risingwave_expr::{function, ExprError}; + +#[function("col_description(varchar, int4) -> varchar")] +fn col_description(_name: &str, _col: i32, writer: &mut impl Write) -> Result<(), ExprError> { + // TODO: Currently we don't support `COMMENT` statement, so we just return empty string. + writer.write_str("").unwrap(); + + Ok(()) +} diff --git a/src/frontend/src/expr/function_impl/context.rs b/src/frontend/src/expr/function_impl/context.rs new file mode 100644 index 0000000000000..13a7175fabb54 --- /dev/null +++ b/src/frontend/src/expr/function_impl/context.rs @@ -0,0 +1,27 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use risingwave_common::session_config::SearchPath; +use risingwave_expr::define_context; + +use crate::session::AuthContext; + +define_context! { + pub(super) CATALOG_READER: crate::catalog::CatalogReader, + pub(super) AUTH_CONTEXT: Arc, + pub(super) DB_NAME: String, + pub(super) SEARCH_PATH: SearchPath, +} diff --git a/src/expr/src/function/mod.rs b/src/frontend/src/expr/function_impl/mod.rs similarity index 90% rename from src/expr/src/function/mod.rs rename to src/frontend/src/expr/function_impl/mod.rs index ed628e99398ca..1f31b7f307dac 100644 --- a/src/expr/src/function/mod.rs +++ b/src/frontend/src/expr/function_impl/mod.rs @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod window; - -// TODO(rc): this module is to be removed +mod cast_regclass; +mod col_description; +pub mod context; diff --git a/src/frontend/src/expr/literal.rs b/src/frontend/src/expr/literal.rs index bf4c95b2114d4..2882243f93170 100644 --- a/src/frontend/src/expr/literal.rs +++ b/src/frontend/src/expr/literal.rs @@ -14,8 +14,7 @@ use risingwave_common::array::list_array::display_for_explain; use risingwave_common::types::{literal_type_match, DataType, Datum, ToText}; -use risingwave_common::util::value_encoding::{deserialize_datum, serialize_datum}; -use risingwave_pb::data::PbDatum; +use risingwave_common::util::value_encoding::{DatumFromProtoExt, DatumToProtoExt}; use risingwave_pb::expr::expr_node::RexNode; use super::Expr; @@ -121,8 +120,7 @@ impl Expr for Literal { /// Convert a literal value (datum) into protobuf. pub fn literal_to_value_encoding(d: &Datum) -> RexNode { - let body = serialize_datum(d.as_ref()); - RexNode::Constant(PbDatum { body }) + RexNode::Constant(d.to_protobuf()) } /// Convert protobuf into a literal value (datum). @@ -132,7 +130,7 @@ fn value_encoding_to_literal( ) -> risingwave_common::error::Result { if let Some(rex_node) = proto { if let RexNode::Constant(prost_datum) = rex_node { - let datum = deserialize_datum(prost_datum.body.as_ref(), ty)?; + let datum = Datum::from_protobuf(prost_datum, ty)?; Ok(datum) } else { unreachable!() @@ -145,8 +143,8 @@ fn value_encoding_to_literal( #[cfg(test)] mod tests { use risingwave_common::array::{ListValue, StructValue}; - use risingwave_common::types::{DataType, ScalarImpl}; - use risingwave_common::util::value_encoding::deserialize_datum; + use risingwave_common::types::{DataType, Datum, ScalarImpl}; + use risingwave_common::util::value_encoding::DatumFromProtoExt; use risingwave_pb::expr::expr_node::RexNode; use crate::expr::literal::literal_to_value_encoding; @@ -161,8 +159,8 @@ mod tests { let data = Some(ScalarImpl::Struct(value.clone())); let node = literal_to_value_encoding(&data); if let RexNode::Constant(prost) = node { - let data2 = deserialize_datum( - prost.get_body().as_slice(), + let data2 = Datum::from_protobuf( + &prost, &DataType::new_struct( vec![DataType::Varchar, DataType::Int32, DataType::Int32], vec![], @@ -184,12 +182,9 @@ mod tests { let data = Some(ScalarImpl::List(value.clone())); let node = literal_to_value_encoding(&data); if let RexNode::Constant(prost) = node { - let data2 = deserialize_datum( - prost.get_body().as_slice(), - &DataType::List(Box::new(DataType::Varchar)), - ) - .unwrap() - .unwrap(); + let data2 = Datum::from_protobuf(&prost, &DataType::List(Box::new(DataType::Varchar))) + .unwrap() + .unwrap(); assert_eq!(ScalarImpl::List(value), data2); } } diff --git a/src/frontend/src/expr/mod.rs b/src/frontend/src/expr/mod.rs index d999fcbe4c1e8..6eec2983f5c91 100644 --- a/src/frontend/src/expr/mod.rs +++ b/src/frontend/src/expr/mod.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::iter::once; - use enum_as_inner::EnumAsInner; use fixedbitset::FixedBitSet; use futures::FutureExt; @@ -21,7 +19,7 @@ use paste::paste; use risingwave_common::array::ListValue; use risingwave_common::error::{ErrorCode, Result as RwResult}; use risingwave_common::types::{DataType, Datum, Scalar}; -use risingwave_expr::agg::AggKind; +use risingwave_expr::aggregate::AggKind; use risingwave_expr::expr::build_from_prost; use risingwave_pb::expr::expr_node::RexNode; use risingwave_pb::expr::{ExprNode, ProjectSetSelectItem}; @@ -46,6 +44,7 @@ pub use order_by_expr::{OrderBy, OrderByExpr}; mod expr_mutator; mod expr_rewriter; mod expr_visitor; +pub mod function_impl; mod session_timezone; mod type_inference; mod utils; @@ -67,8 +66,8 @@ pub use session_timezone::SessionTimezone; pub use subquery::{Subquery, SubqueryKind}; pub use table_function::{TableFunction, TableFunctionType}; pub use type_inference::{ - agg_func_sigs, align_types, cast_map_array, cast_ok, cast_sigs, func_sigs, infer_some_all, - infer_type, least_restrictive, AggFuncSig, CastContext, CastSig, FuncSign, + align_types, cast_map_array, cast_ok, cast_sigs, infer_some_all, infer_type, least_restrictive, + CastContext, CastSig, FuncSign, }; pub use user_defined_function::UserDefinedFunction; pub use utils::*; @@ -201,7 +200,7 @@ impl ExprImpl { /// # Panics /// Panics if `input_ref >= input_col_num`. pub fn collect_input_refs(&self, input_col_num: usize) -> FixedBitSet { - collect_input_refs(input_col_num, once(self)) + collect_input_refs(input_col_num, [self]) } /// Check if the expression has no side effects and output is deterministic @@ -253,6 +252,11 @@ impl ExprImpl { FunctionCall::cast_mut(self, target, CastContext::Implicit) } + /// Shorthand to inplace cast expr to `target` type in explicit context. + pub fn cast_explicit_mut(&mut self, target: DataType) -> Result<(), CastError> { + FunctionCall::cast_mut(self, target, CastContext::Explicit) + } + /// Ensure the return type of this expression is an array of some type. pub fn ensure_array_type(&self) -> Result<(), ErrorCode> { if self.is_untyped() { @@ -347,7 +351,9 @@ macro_rules! impl_has_variant { pub fn [](&self) -> bool { struct Has {} - impl ExprVisitor for Has { + impl ExprVisitor for Has { + + type Result = bool; fn merge(a: bool, b: bool) -> bool { a | b @@ -418,7 +424,9 @@ impl ExprImpl { depth: usize, } - impl ExprVisitor for Has { + impl ExprVisitor for Has { + type Result = bool; + fn merge(a: bool, b: bool) -> bool { a | b } @@ -476,7 +484,9 @@ impl ExprImpl { correlated_id: CorrelatedId, } - impl ExprVisitor for Has { + impl ExprVisitor for Has { + type Result = bool; + fn merge(a: bool, b: bool) -> bool { a | b } @@ -596,7 +606,9 @@ impl ExprImpl { struct HasOthers { has_others: bool, } - impl ExprVisitor<()> for HasOthers { + impl ExprVisitor for HasOthers { + type Result = (); + fn merge(_: (), _: ()) {} fn visit_expr(&mut self, expr: &ExprImpl) { diff --git a/src/frontend/src/expr/order_by_expr.rs b/src/frontend/src/expr/order_by_expr.rs index 48790ceb39337..e7fe005256640 100644 --- a/src/frontend/src/expr/order_by_expr.rs +++ b/src/frontend/src/expr/order_by_expr.rs @@ -71,7 +71,7 @@ impl OrderBy { } } - pub fn visit_expr + ?Sized>(&self, visitor: &mut V) -> R { + pub fn visit_expr(&self, visitor: &mut V) -> V::Result { self.sort_exprs .iter() .map(|expr| visitor.visit_expr(&expr.expr)) diff --git a/src/frontend/src/expr/pure.rs b/src/frontend/src/expr/pure.rs index a229fed79b4f0..e5d698c2ce172 100644 --- a/src/frontend/src/expr/pure.rs +++ b/src/frontend/src/expr/pure.rs @@ -18,7 +18,9 @@ use super::{ExprImpl, ExprVisitor}; use crate::expr::FunctionCall; pub(crate) struct ImpureAnalyzer {} -impl ExprVisitor for ImpureAnalyzer { +impl ExprVisitor for ImpureAnalyzer { + type Result = bool; + fn merge(a: bool, b: bool) -> bool { // the expr will be impure if any of its input is impure a || b @@ -105,6 +107,7 @@ impl ExprVisitor for ImpureAnalyzer { | expr_node::Type::RegexpMatch | expr_node::Type::RegexpReplace | expr_node::Type::RegexpCount + | expr_node::Type::RegexpSplitToArray | expr_node::Type::Pow | expr_node::Type::Exp | expr_node::Type::Ln @@ -154,6 +157,7 @@ impl ExprVisitor for ImpureAnalyzer { | expr_node::Type::ArrayToString | expr_node::Type::ArrayCat | expr_node::Type::ArrayMax + | expr_node::Type::ArraySum | expr_node::Type::ArraySort | expr_node::Type::ArrayAppend | expr_node::Type::ArrayPrepend @@ -168,6 +172,7 @@ impl ExprVisitor for ImpureAnalyzer { | expr_node::Type::ArrayReplace | expr_node::Type::ArrayPosition | expr_node::Type::HexToInt256 + | expr_node::Type::JsonbCat | expr_node::Type::JsonbAccessInner | expr_node::Type::JsonbAccessStr | expr_node::Type::JsonbTypeof @@ -195,7 +200,9 @@ impl ExprVisitor for ImpureAnalyzer { | expr_node::Type::ArrayPositions | expr_node::Type::StringToArray | expr_node::Type::Format - | expr_node::Type::ArrayTransform => + | expr_node::Type::ArrayTransform + | expr_node::Type::Greatest + | expr_node::Type::Least => // expression output is deterministic(same result for the same input) { let x = func_call @@ -207,7 +214,13 @@ impl ExprVisitor for ImpureAnalyzer { x } // expression output is not deterministic - expr_node::Type::Vnode | expr_node::Type::Proctime => true, + expr_node::Type::Vnode + | expr_node::Type::Proctime + | expr_node::Type::PgSleep + | expr_node::Type::PgSleepFor + | expr_node::Type::PgSleepUntil + | expr_node::Type::ColDescription + | expr_node::Type::CastRegclass => true, } } } diff --git a/src/frontend/src/expr/table_function.rs b/src/frontend/src/expr/table_function.rs index 6b01ca2bc98cb..dfb028d605705 100644 --- a/src/frontend/src/expr/table_function.rs +++ b/src/frontend/src/expr/table_function.rs @@ -15,9 +15,8 @@ use std::sync::Arc; use itertools::Itertools; -use risingwave_common::error::ErrorCode; use risingwave_common::types::DataType; -use risingwave_expr::sig::table_function::FUNC_SIG_MAP; +use risingwave_expr::sig::FUNCTION_REGISTRY; pub use risingwave_pb::expr::table_function::PbType as TableFunctionType; use risingwave_pb::expr::{ TableFunction as TableFunctionPb, UserDefinedTableFunction as UserDefinedTableFunctionPb, @@ -44,20 +43,10 @@ impl TableFunction { /// Create a `TableFunction` expr with the return type inferred from `func_type` and types of /// `inputs`. pub fn new(func_type: TableFunctionType, args: Vec) -> RwResult { - let arg_types = args.iter().map(|c| c.return_type()).collect_vec(); - let signature = FUNC_SIG_MAP - .get( - func_type, - &args.iter().map(|c| c.return_type().into()).collect_vec(), - ) - .ok_or_else(|| { - ErrorCode::BindError(format!( - "table function not found: {:?}({})", - func_type, - arg_types.iter().map(|t| format!("{:?}", t)).join(", "), - )) - })?; - let return_type = (signature.type_infer)(&arg_types)?; + let return_type = FUNCTION_REGISTRY.get_return_type( + func_type, + &args.iter().map(|c| c.return_type()).collect_vec(), + )?; Ok(TableFunction { args, return_type, diff --git a/src/frontend/src/expr/type_inference/cast.rs b/src/frontend/src/expr/type_inference/cast.rs index b7e3749236f9f..b941732a2a720 100644 --- a/src/frontend/src/expr/type_inference/cast.rs +++ b/src/frontend/src/expr/type_inference/cast.rs @@ -111,11 +111,13 @@ pub fn align_array_and_element( pub fn cast_ok(source: &DataType, target: &DataType, allows: CastContext) -> bool { cast_ok_struct(source, target, allows) || cast_ok_array(source, target, allows) - || cast_ok_base(source.into(), target.into(), allows) + || cast_ok_base(source, target, allows) } -pub fn cast_ok_base(source: DataTypeName, target: DataTypeName, allows: CastContext) -> bool { - matches!(CAST_MAP.get(&(source, target)), Some(context) if *context <= allows) +/// Checks whether casting from `source` to `target` is ok in `allows` context. +/// Both `source` and `target` must be base types, i.e. not struct or array. +pub fn cast_ok_base(source: &DataType, target: &DataType, allows: CastContext) -> bool { + matches!(CAST_MAP.get(&(source.into(), target.into())), Some(context) if *context <= allows) } fn cast_ok_struct(source: &DataType, target: &DataType, allows: CastContext) -> bool { diff --git a/src/frontend/src/expr/type_inference/func.rs b/src/frontend/src/expr/type_inference/func.rs index 771d7a1c6406d..84e315dacae45 100644 --- a/src/frontend/src/expr/type_inference/func.rs +++ b/src/frontend/src/expr/type_inference/func.rs @@ -14,10 +14,10 @@ use itertools::Itertools as _; use num_integer::Integer as _; -use risingwave_common::error::{ErrorCode, Result, RwError}; -use risingwave_common::types::{DataType, DataTypeName, ScalarImpl, StructType}; +use risingwave_common::error::{ErrorCode, Result}; +use risingwave_common::types::{DataType, StructType}; use risingwave_common::util::iter_util::ZipEqFast; -pub use risingwave_expr::sig::func::*; +pub use risingwave_expr::sig::*; use super::{align_types, cast_ok_base, CastContext}; use crate::expr::type_inference::cast::align_array_and_element; @@ -27,7 +27,7 @@ use crate::expr::{cast_ok, is_row_function, Expr as _, ExprImpl, ExprType, Funct /// is not supported on backend. /// /// It also mutates the `inputs` by adding necessary casts. -pub fn infer_type(func_type: ExprType, inputs: &mut Vec) -> Result { +pub fn infer_type(func_type: ExprType, inputs: &mut [ExprImpl]) -> Result { if let Some(res) = infer_type_for_special(func_type, inputs).transpose() { return res; } @@ -36,30 +36,28 @@ pub fn infer_type(func_type: ExprType, inputs: &mut Vec) -> Result None, - false => Some(e.return_type().into()), + false => Some(e.return_type()), }) .collect_vec(); - let sig = infer_type_name(&FUNC_SIG_MAP, func_type, &actuals)?; - let inputs_owned = std::mem::take(inputs); - *inputs = inputs_owned - .into_iter() - .zip_eq_fast(sig.inputs_type) - .map(|(expr, t)| { - if expr.is_untyped() || DataTypeName::from(expr.return_type()) != *t { - if t.is_scalar() { - return expr.cast_implicit((*t).into()).map_err(Into::into); - } else { - return Err(ErrorCode::BindError(format!( - "Cannot implicitly cast '{:?}' to polymorphic type {:?}", - &expr, t - )) - .into()); - } + let sig = infer_type_name(&FUNCTION_REGISTRY, func_type, &actuals)?; + + // add implicit casts to inputs + for (expr, t) in inputs.iter_mut().zip_eq_fast(&sig.inputs_type) { + if expr.is_untyped() || !t.matches(&expr.return_type()) { + if let SigDataType::Exact(t) = t { + expr.cast_implicit_mut(t.clone())?; + } else { + return Err(ErrorCode::BindError(format!( + "Cannot implicitly cast '{expr:?}' to polymorphic type {t:?}", + )) + .into()); } - Ok(expr) - }) - .try_collect::<_, _, RwError>()?; - Ok(sig.ret_type.into()) + } + } + + let input_types = inputs.iter().map(|expr| expr.return_type()).collect_vec(); + let return_type = (sig.type_infer)(&input_types)?; + Ok(return_type) } pub fn infer_some_all( @@ -69,7 +67,7 @@ pub fn infer_some_all( let element_type = if inputs[1].is_untyped() { None } else if let DataType::List(datatype) = inputs[1].return_type() { - Some(DataTypeName::from(*datatype)) + Some(*datatype) } else { return Err(ErrorCode::BindError( "op SOME/ANY/ALL (array) requires array on right side".to_string(), @@ -79,37 +77,38 @@ pub fn infer_some_all( let final_type = func_types.pop().unwrap(); let actuals = vec![ - (!inputs[0].is_untyped()).then_some(inputs[0].return_type().into()), - element_type, + (!inputs[0].is_untyped()).then_some(inputs[0].return_type()), + element_type.clone(), ]; - let sig = infer_type_name(&FUNC_SIG_MAP, final_type, &actuals)?; - if DataTypeName::from(inputs[0].return_type()) != sig.inputs_type[0] { - if matches!( - sig.inputs_type[0], - DataTypeName::List | DataTypeName::Struct - ) { + let sig = infer_type_name(&FUNCTION_REGISTRY, final_type, &actuals)?; + if sig.ret_type != DataType::Boolean.into() { + return Err(ErrorCode::BindError(format!( + "op SOME/ANY/ALL (array) requires operator to yield boolean, but got {}", + sig.ret_type + )) + .into()); + } + if !sig.inputs_type[0].matches(&inputs[0].return_type()) { + let SigDataType::Exact(t) = &sig.inputs_type[0] else { return Err(ErrorCode::BindError( "array of array/struct on right are not supported yet".into(), ) .into()); - } - inputs[0].cast_implicit_mut(sig.inputs_type[0].into())?; + }; + inputs[0].cast_implicit_mut(t.clone())?; } - if element_type != Some(sig.inputs_type[1]) { - if matches!( - sig.inputs_type[1], - DataTypeName::List | DataTypeName::Struct - ) { + if !matches!(&element_type, Some(e) if sig.inputs_type[1].matches(e)) { + let SigDataType::Exact(t) = &sig.inputs_type[1] else { return Err( ErrorCode::BindError("array/struct on left are not supported yet".into()).into(), ); - } - inputs[1].cast_implicit_mut(DataType::List(Box::new(sig.inputs_type[1].into())))?; + }; + inputs[1].cast_implicit_mut(DataType::List(Box::new(t.clone())))?; } let inputs_owned = std::mem::take(inputs); let mut func_call = - FunctionCall::new_unchecked(final_type, inputs_owned, sig.ret_type.into()).into(); + FunctionCall::new_unchecked(final_type, inputs_owned, DataType::Boolean).into(); while let Some(func_type) = func_types.pop() { func_call = FunctionCall::new(func_type, vec![func_call])?.into(); } @@ -273,17 +272,17 @@ fn infer_struct_cast_target_type( (NestedType::Infer(l), NestedType::Infer(r)) => { // Both sides are *unknown*, using the sig_map to infer the return type. let actuals = vec![None, None]; - let sig = infer_type_name(&FUNC_SIG_MAP, func_type, &actuals)?; + let sig = infer_type_name(&FUNCTION_REGISTRY, func_type, &actuals)?; Ok(( sig.ret_type != l.into(), sig.ret_type != r.into(), - sig.ret_type.into(), + sig.ret_type.as_exact().clone(), )) } } } -/// Special exprs that cannot be handled by [`infer_type_name`] and [`FuncSigMap`] are handled here. +/// Special exprs that cannot be handled by [`infer_type_name`] and [`FunctionRegistry`] are handled here. /// These include variadic functions, list and struct type, as well as non-implicit cast. /// /// We should aim for enhancing the general inferring framework and reduce the special cases here. @@ -294,7 +293,7 @@ fn infer_struct_cast_target_type( /// * `Ok(None)` when no special rule matches and it should try general rules later fn infer_type_for_special( func_type: ExprType, - inputs: &mut Vec, + inputs: &mut [ExprImpl], ) -> Result> { match func_type { ExprType::Case => { @@ -321,40 +320,31 @@ fn infer_type_for_special( } ExprType::ConcatWs => { ensure_arity!("concat_ws", 2 <= | inputs |); - let inputs_owned = std::mem::take(inputs); - *inputs = inputs_owned - .into_iter() - .enumerate() - .map(|(i, input)| match i { - // 0-th arg must be string - 0 => input.cast_implicit(DataType::Varchar).map_err(Into::into), - // subsequent can be any type, using the output format - _ => input.cast_output(), - }) - .try_collect()?; + // 0-th arg must be string + inputs[0].cast_implicit_mut(DataType::Varchar)?; + for input in inputs.iter_mut().skip(1) { + // subsequent can be any type, using the output format + let owned = input.take(); + *input = owned.cast_output()?; + } Ok(Some(DataType::Varchar)) } ExprType::ConcatOp => { - let inputs_owned = std::mem::take(inputs); - *inputs = inputs_owned - .into_iter() - .map(|input| input.cast_explicit(DataType::Varchar)) - .try_collect()?; - Ok(Some(DataType::Varchar)) - } - ExprType::IsNotNull => { - ensure_arity!("is_not_null", | inputs | == 1); - match inputs[0].return_type() { - DataType::Struct(_) | DataType::List { .. } => Ok(Some(DataType::Boolean)), - _ => Ok(None), + for input in inputs { + input.cast_explicit_mut(DataType::Varchar)?; } + Ok(Some(DataType::Varchar)) } - ExprType::IsNull => { - ensure_arity!("is_null", | inputs | == 1); - match inputs[0].return_type() { - DataType::Struct(_) | DataType::List { .. } => Ok(Some(DataType::Boolean)), - _ => Ok(None), + ExprType::Format => { + ensure_arity!("format", 1 <= | inputs |); + // 0-th arg must be string + inputs[0].cast_implicit_mut(DataType::Varchar)?; + for input in inputs.iter_mut().skip(1) { + // subsequent can be any type, using the output format + let owned = input.take(); + *input = owned.cast_output()?; } + Ok(Some(DataType::Varchar)) } ExprType::Equal | ExprType::NotEqual @@ -416,62 +406,6 @@ fn infer_type_for_special( .into()) } } - ExprType::RegexpMatch => { - ensure_arity!("regexp_match", 2 <= | inputs | <= 3); - if inputs.len() == 3 { - match &inputs[2] { - ExprImpl::Literal(flag) => { - match flag.get_data() { - Some(flag) => { - let ScalarImpl::Utf8(flag) = flag else { - return Err(ErrorCode::BindError( - "flag in regexp_match must be a literal string".to_string(), - ) - .into()); - }; - for c in flag.chars() { - if c == 'g' { - return Err(ErrorCode::InvalidInputSyntax( - "regexp_match() does not support the \"global\" option. Use the regexp_matches function instead." - .to_string(), - ) - .into()); - } - if !"ic".contains(c) { - return Err(ErrorCode::NotImplemented( - format!("invalid regular expression option: \"{c}\""), - None.into(), - ) - .into()); - } - } - } - None => { - // flag is NULL. Will return NULL. - } - } - } - _ => { - return Err(ErrorCode::BindError( - "flag in regexp_match must be a literal string".to_string(), - ) - .into()) - } - } - } - Ok(Some(DataType::List(Box::new(DataType::Varchar)))) - } - ExprType::RegexpReplace => { - // regexp_replace(source, pattern, replacement [, start [, N ]] [, flags ]) - // TODO: Preprocessing? - ensure_arity!("regexp_replace", 3 <= | inputs | <= 6); - Ok(Some(DataType::Varchar)) - } - ExprType::RegexpCount => { - // TODO: Preprocessing? - ensure_arity!("regexp_count", 2 <= | inputs | <= 4); - Ok(Some(DataType::Int32)) - } ExprType::ArrayCat => { ensure_arity!("array_cat", | inputs | == 2); let left_type = (!inputs[0].is_untyped()).then(|| inputs[0].return_type()); @@ -483,11 +417,9 @@ fn infer_type_for_special( // when neither type is available, default to `varchar[]` // when one side is unknown and other side is list, use that list type let t = t.unwrap_or_else(|| DataType::List(DataType::Varchar.into())); - let inputs_owned = std::mem::take(inputs); - *inputs = inputs_owned - .into_iter() - .map(|e| e.cast_implicit(t.clone())) - .try_collect()?; + for input in &mut *inputs { + input.cast_implicit_mut(t.clone())?; + } Some(t) } (Some(DataType::List(_)), Some(DataType::List(_))) => { @@ -589,24 +521,6 @@ fn infer_type_for_special( .into()), } } - ExprType::ArrayDistinct => { - ensure_arity!("array_distinct", | inputs | == 1); - inputs[0].ensure_array_type()?; - - Ok(Some(inputs[0].return_type())) - } - ExprType::ArrayMin => { - ensure_arity!("array_min", | inputs | == 1); - inputs[0].ensure_array_type()?; - - Ok(Some(inputs[0].return_type().as_list().clone())) - } - ExprType::ArraySort => { - ensure_arity!("array_sort", | inputs | == 1); - inputs[0].ensure_array_type()?; - - Ok(Some(inputs[0].return_type())) - } ExprType::ArrayDims => { ensure_arity!("array_dims", | inputs | == 1); inputs[0].ensure_array_type()?; @@ -619,36 +533,13 @@ fn infer_type_for_special( } Ok(Some(DataType::Varchar)) } - ExprType::ArrayMax => { - ensure_arity!("array_max", | inputs | == 1); - inputs[0].ensure_array_type()?; - - Ok(Some(inputs[0].return_type().as_list().clone())) - } - ExprType::StringToArray => { - ensure_arity!("string_to_array", 2 <= | inputs | <= 3); - - if !inputs.iter().all(|e| e.return_type() == DataType::Varchar) { - return Ok(None); - } - - Ok(Some(DataType::List(Box::new(DataType::Varchar)))) - } - ExprType::TrimArray => { - ensure_arity!("trim_array", | inputs | == 2); - inputs[0].ensure_array_type()?; - - inputs[1].cast_implicit_mut(DataType::Int32)?; - - Ok(Some(inputs[0].return_type())) - } ExprType::Vnode => { ensure_arity!("vnode", 1 <= | inputs |); Ok(Some(DataType::Int16)) } - ExprType::Proctime => { - ensure_arity!("proctime", | inputs | == 0); - Ok(Some(DataType::Timestamptz)) + ExprType::Greatest | ExprType::Least => { + ensure_arity!("greatest/least", 1 <= | inputs |); + Ok(Some(align_types(inputs.iter_mut())?)) } _ => Ok(None), } @@ -674,22 +565,24 @@ fn infer_type_for_special( /// 5. Attempt to narrow down candidates by assuming all arguments are same type. This covers Rule /// 4f in `PostgreSQL`. See [`narrow_same_type`] for details. fn infer_type_name<'a>( - sig_map: &'a FuncSigMap, + sig_map: &'a FunctionRegistry, func_type: ExprType, - inputs: &[Option], + inputs: &[Option], ) -> Result<&'a FuncSign> { let candidates = sig_map.get_with_arg_nums(func_type, inputs.len()); // Binary operators have a special `unknown` handling rule for exact match. We do not // distinguish operators from functions as of now. if inputs.len() == 2 { - let t = match (inputs[0], inputs[1]) { + let t = match (&inputs[0], &inputs[1]) { (None, t) => Ok(t), (t, None) => Ok(t), (Some(_), Some(_)) => Err(()), }; if let Ok(Some(t)) = t { - let exact = candidates.iter().find(|sig| sig.inputs_type == [t, t]); + let exact = candidates + .iter() + .find(|sig| sig.inputs_type[0].matches(t) && sig.inputs_type[1].matches(t)); if let Some(sig) = exact { return Ok(sig); } @@ -731,11 +624,11 @@ fn infer_type_name<'a>( /// Checks if `t` is a preferred type in any type category, as defined by `PostgreSQL`: /// . -fn is_preferred(t: DataTypeName) -> bool { - use DataTypeName as T; +fn is_preferred(t: &SigDataType) -> bool { + use DataType as T; matches!( t, - T::Float64 | T::Boolean | T::Varchar | T::Timestamptz | T::Interval + SigDataType::Exact(T::Float64 | T::Boolean | T::Varchar | T::Timestamptz | T::Interval) ) } @@ -745,8 +638,9 @@ fn is_preferred(t: DataTypeName) -> bool { /// /// Sometimes it is more convenient to include equality when checking whether a formal parameter can /// accept an actual argument. So we introduced `eq_ok` to control this behavior. -fn implicit_ok(source: DataTypeName, target: DataTypeName, eq_ok: bool) -> bool { - eq_ok && source == target || cast_ok_base(source, target, CastContext::Implicit) +fn implicit_ok(source: &DataType, target: &SigDataType, eq_ok: bool) -> bool { + eq_ok && target.matches(source) + || target.is_exact() && cast_ok_base(source, target.as_exact(), CastContext::Implicit) } /// Find the top `candidates` that match `inputs` on most non-null positions. This covers Rule 2, @@ -775,10 +669,7 @@ fn implicit_ok(source: DataTypeName, target: DataTypeName, eq_ok: bool) -> bool /// [rule 4a src]: https://github.com/postgres/postgres/blob/86a4dc1e6f29d1992a2afa3fac1a0b0a6e84568c/src/backend/parser/parse_func.c#L907-L947 /// [rule 4c src]: https://github.com/postgres/postgres/blob/86a4dc1e6f29d1992a2afa3fac1a0b0a6e84568c/src/backend/parser/parse_func.c#L1062-L1104 /// [rule 4d src]: https://github.com/postgres/postgres/blob/86a4dc1e6f29d1992a2afa3fac1a0b0a6e84568c/src/backend/parser/parse_func.c#L1106-L1153 -fn top_matches<'a>( - candidates: &[&'a FuncSign], - inputs: &[Option], -) -> Vec<&'a FuncSign> { +fn top_matches<'a>(candidates: &[&'a FuncSign], inputs: &[Option]) -> Vec<&'a FuncSign> { let mut best_exact = 0; let mut best_preferred = 0; let mut best_candidates = Vec::new(); @@ -789,13 +680,13 @@ fn top_matches<'a>( let mut castable = true; for (formal, actual) in sig.inputs_type.iter().zip_eq_fast(inputs) { let Some(actual) = actual else { continue }; - if formal == actual { + if formal.matches(actual) { n_exact += 1; - } else if !implicit_ok(*actual, *formal, false) { + } else if !implicit_ok(actual, formal, false) { castable = false; break; } - if is_preferred(*formal) { + if is_preferred(formal) { n_preferred += 1; } } @@ -837,9 +728,9 @@ fn top_matches<'a>( /// [rule 4e src]: https://github.com/postgres/postgres/blob/86a4dc1e6f29d1992a2afa3fac1a0b0a6e84568c/src/backend/parser/parse_func.c#L1164-L1298 fn narrow_category<'a>( candidates: Vec<&'a FuncSign>, - inputs: &[Option], + inputs: &[Option], ) -> Vec<&'a FuncSign> { - const BIASED_TYPE: DataTypeName = DataTypeName::Varchar; + const BIASED_TYPE: SigDataType = SigDataType::Exact(DataType::Varchar); let Ok(categories) = inputs .iter() .enumerate() @@ -856,21 +747,21 @@ fn narrow_category<'a>( if actual.is_some() { return Ok(None); } - let mut category = Ok(candidates[0].inputs_type[i]); + let mut category = Ok(&candidates[0].inputs_type[i]); for sig in &candidates[1..] { - let formal = sig.inputs_type[i]; - if formal == BIASED_TYPE || category == Ok(BIASED_TYPE) { - category = Ok(BIASED_TYPE); + let formal = &sig.inputs_type[i]; + if formal == &BIASED_TYPE || category == Ok(&BIASED_TYPE) { + category = Ok(&BIASED_TYPE); break; } // formal != BIASED_TYPE && category.is_err(): // - Category conflict err can only be solved by a later varchar. Skip this // candidate. - let Ok(selected) = category else { continue }; + let Ok(selected) = &category else { continue }; // least_restrictive or mark temporary conflict err - if implicit_ok(formal, selected, true) { + if formal.is_exact() && implicit_ok(formal.as_exact(), selected, true) { // noop - } else if implicit_ok(selected, formal, false) { + } else if selected.is_exact() && implicit_ok(selected.as_exact(), formal, false) { category = Ok(formal); } else { category = Err(()); @@ -895,8 +786,10 @@ fn narrow_category<'a>( let Some(selected) = category else { return true; }; - *formal == *selected - || !is_preferred(*selected) && implicit_ok(*formal, *selected, false) + formal == *selected + || !is_preferred(selected) + && formal.is_exact() + && implicit_ok(formal.as_exact(), selected, false) }) }) .copied() @@ -928,12 +821,12 @@ fn narrow_category<'a>( /// [Rule 2]: https://www.postgresql.org/docs/current/typeconv-oper.html#:~:text=then%20assume%20it%20is%20the%20same%20type%20as%20the%20other%20argument%20for%20this%20check fn narrow_same_type<'a>( candidates: Vec<&'a FuncSign>, - inputs: &[Option], + inputs: &[Option], ) -> Vec<&'a FuncSign> { let Ok(Some(same_type)) = inputs.iter().try_fold(None, |acc, cur| match (acc, cur) { - (None, t) => Ok(*t), + (None, t) => Ok(t.as_ref()), (t, None) => Ok(t), - (Some(l), Some(r)) if l == *r => Ok(Some(l)), + (Some(l), Some(r)) if l == r => Ok(Some(l)), _ => Err(()), }) else { return candidates; @@ -943,7 +836,7 @@ fn narrow_same_type<'a>( .filter(|sig| { sig.inputs_type .iter() - .all(|formal| implicit_ok(same_type, *formal, true)) + .all(|formal| implicit_ok(same_type, formal, true)) }) .copied() .collect_vec(); @@ -953,7 +846,7 @@ fn narrow_same_type<'a>( } } -struct TypeDebug<'a>(&'a Option); +struct TypeDebug<'a>(&'a Option); impl<'a> std::fmt::Debug for TypeDebug<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.0 { @@ -988,7 +881,7 @@ mod tests { ) .into() }) - .collect(); + .collect_vec(); infer_type(func_type, &mut inputs) } @@ -1129,10 +1022,7 @@ mod tests { #[test] fn test_match_implicit() { - use DataTypeName as T; - // func_name and ret_type does not affect the overload resolution logic - const DUMMY_FUNC: ExprType = ExprType::Add; - const DUMMY_RET: T = T::Int32; + use DataType as T; let testcases = [ ( "Binary special rule prefers arguments of same type.", @@ -1234,20 +1124,28 @@ mod tests { ), ]; for (desc, candidates, inputs, expected) in testcases { - let mut sig_map = FuncSigMap::default(); + let mut sig_map = FunctionRegistry::default(); for formals in candidates { sig_map.insert(FuncSign { - func: DUMMY_FUNC, - inputs_type: formals, - ret_type: DUMMY_RET, - build: |_, _| unreachable!(), + // func_name does not affect the overload resolution logic + name: ExprType::Add.into(), + inputs_type: formals.iter().map(|t| t.clone().into()).collect(), + variadic: false, + // ret_type does not affect the overload resolution logic + ret_type: T::Int32.into(), + build: FuncBuilder::Scalar(|_, _| unreachable!()), + type_infer: |_| unreachable!(), deprecated: false, + state_type: None, + append_only: false, }); } - let result = infer_type_name(&sig_map, DUMMY_FUNC, inputs); + let result = infer_type_name(&sig_map, ExprType::Add, inputs); match (expected, result) { (Ok(expected), Ok(found)) => { - assert_eq!(expected, found.inputs_type, "case `{}`", desc) + if !found.match_args(expected) { + panic!("case `{}` expect {:?} != found {:?}", desc, expected, found) + } } (Ok(_), Err(err)) => panic!("case `{}` unexpected error: {:?}", desc, err), (Err(_), Ok(f)) => panic!( diff --git a/src/frontend/src/expr/type_inference/mod.rs b/src/frontend/src/expr/type_inference/mod.rs index 8135787ea7cdd..08007b14a2751 100644 --- a/src/frontend/src/expr/type_inference/mod.rs +++ b/src/frontend/src/expr/type_inference/mod.rs @@ -21,5 +21,4 @@ pub use cast::{ align_types, cast_map_array, cast_ok, cast_ok_base, cast_sigs, least_restrictive, CastContext, CastSig, }; -pub use func::{func_sigs, infer_some_all, infer_type, FuncSign}; -pub use risingwave_expr::sig::agg::{agg_func_sigs, AggFuncSig}; +pub use func::{infer_some_all, infer_type, FuncSign}; diff --git a/src/frontend/src/expr/utils.rs b/src/frontend/src/expr/utils.rs index d07287b08dbe2..39064d1680359 100644 --- a/src/frontend/src/expr/utils.rs +++ b/src/frontend/src/expr/utils.rs @@ -353,7 +353,9 @@ pub struct CollectInputRef { input_bits: FixedBitSet, } -impl ExprVisitor<()> for CollectInputRef { +impl ExprVisitor for CollectInputRef { + type Result = (); + fn merge(_: (), _: ()) {} fn visit_input_ref(&mut self, expr: &InputRef) { @@ -408,7 +410,9 @@ pub fn collect_input_refs<'a>( #[derive(Clone, Default)] pub struct CountNow {} -impl ExprVisitor for CountNow { +impl ExprVisitor for CountNow { + type Result = usize; + fn merge(a: usize, b: usize) -> usize { a + b } diff --git a/src/frontend/src/expr/window_function.rs b/src/frontend/src/expr/window_function.rs index 62f961515cdd0..371a00dc6b62a 100644 --- a/src/frontend/src/expr/window_function.rs +++ b/src/frontend/src/expr/window_function.rs @@ -15,7 +15,7 @@ use itertools::Itertools; use risingwave_common::error::{ErrorCode, RwError}; use risingwave_common::types::DataType; -use risingwave_expr::function::window::{Frame, WindowFuncKind}; +use risingwave_expr::window_function::{Frame, WindowFuncKind}; use super::{AggCall, Expr, ExprImpl, OrderBy, RwResult}; diff --git a/src/frontend/src/handler/alter_source_column.rs b/src/frontend/src/handler/alter_source_column.rs index be139fd6d6976..385a1010b50c9 100644 --- a/src/frontend/src/handler/alter_source_column.rs +++ b/src/frontend/src/handler/alter_source_column.rs @@ -16,8 +16,7 @@ use itertools::Itertools; use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_common::catalog::ColumnId; use risingwave_common::error::{ErrorCode, Result, RwError}; -use risingwave_connector::source::{SourceEncode, SourceStruct}; -use risingwave_source::source_desc::extract_source_struct; +use risingwave_connector::source::{extract_source_struct, SourceEncode, SourceStruct}; use risingwave_sqlparser::ast::{ AlterSourceOperation, ColumnDef, CreateSourceStatement, ObjectName, Statement, }; @@ -68,6 +67,12 @@ pub async fn handle_alter_source_column( None.into(), ))); } + SourceEncode::Json if catalog.info.use_schema_registry => { + return Err(RwError::from(ErrorCode::NotImplemented( + "Alter source with schema registry".into(), + None.into(), + ))); + } SourceEncode::Invalid | SourceEncode::Native => { return Err(RwError::from(ErrorCode::NotSupported( format!("Alter source with encode {:?}", encode), diff --git a/src/frontend/src/handler/alter_table_column.rs b/src/frontend/src/handler/alter_table_column.rs index be314befdfae3..1a6d02b963e9d 100644 --- a/src/frontend/src/handler/alter_table_column.rs +++ b/src/frontend/src/handler/alter_table_column.rs @@ -17,16 +17,21 @@ use itertools::Itertools; use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_common::util::column_index_mapping::ColIndexMapping; +use risingwave_pb::catalog::table::OptionalAssociatedSourceId; use risingwave_pb::catalog::Table; use risingwave_pb::stream_plan::stream_fragment_graph::Parallelism; use risingwave_pb::stream_plan::StreamFragmentGraph; -use risingwave_sqlparser::ast::{AlterTableOperation, ColumnOption, ObjectName, Statement}; +use risingwave_sqlparser::ast::{ + AlterTableOperation, ColumnOption, Encode, ObjectName, SourceSchemaV2, Statement, +}; use risingwave_sqlparser::parser::Parser; +use super::create_source::get_json_schema_location; use super::create_table::{gen_create_table_plan, ColumnIdGenerator}; use super::{HandlerArgs, RwPgResponse}; use crate::catalog::root_catalog::SchemaPath; use crate::catalog::table_catalog::TableType; +use crate::handler::create_table::gen_create_table_plan_with_source; use crate::{build_graph, Binder, OptimizerContext, TableCatalog}; /// Handle `ALTER TABLE [ADD|DROP] COLUMN` statements. The `operation` must be either `AddColumn` or @@ -51,13 +56,6 @@ pub async fn handle_alter_table_column( reader.get_table_by_name(db_name, schema_path, &real_table_name)?; match table.table_type() { - // Do not allow altering a table with a connector. It should be done passively according - // to the messages from the connector. - TableType::Table if table.has_associated_source() => { - Err(ErrorCode::InvalidInputSyntax(format!( - "cannot alter table \"{table_name}\" because it has a connector" - )))? - } TableType::Table => {} _ => Err(ErrorCode::InvalidInputSyntax(format!( @@ -82,9 +80,26 @@ pub async fn handle_alter_table_column( .context("unable to parse original table definition")? .try_into() .unwrap(); - let Statement::CreateTable { columns, .. } = &mut definition else { + let Statement::CreateTable { + columns, + source_schema, + .. + } = &mut definition + else { panic!("unexpected statement: {:?}", definition); }; + let source_schema = source_schema + .clone() + .map(|source_schema| source_schema.into_source_schema_v2().0); + + if let Some(source_schema) = &source_schema { + if schema_has_schema_registry(source_schema) { + return Err(RwError::from(ErrorCode::NotImplemented( + "Alter table with source having schema registry".into(), + None.into(), + ))); + } + } match operation { AlterTableOperation::AddColumn { @@ -170,20 +185,32 @@ pub async fn handle_alter_table_column( panic!("unexpected statement type: {:?}", definition); }; - let (graph, table) = { + let (graph, table, source) = { let context = OptimizerContext::from_handler_args(handler_args); - let (plan, source, table) = gen_create_table_plan( - context, - table_name, - columns, - constraints, - col_id_gen, - source_watermarks, - append_only, - )?; - - // We should already have rejected the case where the table has a connector. - assert!(source.is_none()); + let (plan, source, table) = match source_schema { + Some(source_schema) => { + gen_create_table_plan_with_source( + context, + table_name, + columns, + constraints, + source_schema, + source_watermarks, + col_id_gen, + append_only, + ) + .await? + } + None => gen_create_table_plan( + context, + table_name, + columns, + constraints, + col_id_gen, + source_watermarks, + append_only, + )?, + }; // TODO: avoid this backward conversion. if TableCatalog::from(&table).pk_column_ids() != original_catalog.pk_column_ids() { @@ -203,10 +230,13 @@ pub async fn handle_alter_table_column( // Fill the original table ID. let table = Table { id: original_catalog.id().table_id(), + optional_associated_source_id: original_catalog + .associated_source_id() + .map(|source_id| OptionalAssociatedSourceId::AssociatedSourceId(source_id.into())), ..table }; - (graph, table) + (graph, table, source) }; // Calculate the mapping from the original columns to the new columns. @@ -226,12 +256,23 @@ pub async fn handle_alter_table_column( let catalog_writer = session.catalog_writer()?; catalog_writer - .replace_table(table, graph, col_index_mapping) + .replace_table(source, table, graph, col_index_mapping) .await?; Ok(PgResponse::empty_result(StatementType::ALTER_TABLE)) } +fn schema_has_schema_registry(schema: &SourceSchemaV2) -> bool { + match schema.row_encode { + Encode::Avro | Encode::Protobuf => true, + Encode::Json => { + let mut options = schema.gen_options().unwrap(); + matches!(get_json_schema_location(&mut options), Ok(Some(_))) + } + _ => false, + } +} + #[cfg(test)] mod tests { use std::collections::HashMap; diff --git a/src/frontend/src/handler/create_index.rs b/src/frontend/src/handler/create_index.rs index ad4512aca354a..a5a002d3b3d79 100644 --- a/src/frontend/src/handler/create_index.rs +++ b/src/frontend/src/handler/create_index.rs @@ -21,7 +21,7 @@ use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_common::catalog::{IndexId, TableDesc, TableId}; use risingwave_common::error::{ErrorCode, Result}; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use risingwave_pb::catalog::{PbIndex, PbTable}; +use risingwave_pb::catalog::{PbIndex, PbStreamJobStatus, PbTable}; use risingwave_pb::stream_plan::stream_fragment_graph::Parallelism; use risingwave_pb::user::grant_privilege::{Action, Object}; use risingwave_sqlparser::ast; @@ -242,6 +242,7 @@ pub(crate) fn gen_create_index_plan( original_columns, initialized_at_epoch: None, created_at_epoch: None, + stream_job_status: PbStreamJobStatus::Creating.into(), }; let plan: PlanRef = materialize.into(); diff --git a/src/frontend/src/handler/create_mv.rs b/src/frontend/src/handler/create_mv.rs index cc08064700aff..053ba5aa30f19 100644 --- a/src/frontend/src/handler/create_mv.rs +++ b/src/frontend/src/handler/create_mv.rs @@ -15,7 +15,7 @@ use itertools::Itertools; use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_common::error::{ErrorCode, Result}; -use risingwave_pb::catalog::PbTable; +use risingwave_pb::catalog::{CreateType, PbTable}; use risingwave_pb::stream_plan::stream_fragment_graph::Parallelism; use risingwave_pb::user::grant_privilege::Action; use risingwave_sqlparser::ast::{EmitMode, Ident, ObjectName, Query}; @@ -26,6 +26,7 @@ use crate::binder::{Binder, BoundQuery, BoundSetExpr}; use crate::catalog::{check_valid_column_name, CatalogError}; use crate::handler::privilege::resolve_query_privileges; use crate::handler::HandlerArgs; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::Explain; use crate::optimizer::{OptimizerContext, OptimizerContextRef, PlanRef, RelationCollectorVisitor}; use crate::planner::Planner; @@ -163,7 +164,7 @@ pub async fn handle_create_mv( Ok(_) => {} }; - let (table, graph) = { + let (mut table, graph) = { let context = OptimizerContext::from_handler_args(handler_args); let has_order_by = !query.order_by.is_empty(); @@ -175,7 +176,7 @@ It only indicates the physical clustering of the data, which may improve the per let (plan, table) = gen_create_mv_plan(&session, context.into(), query, name, columns, emit_mode)?; - let context = plan.plan_base().ctx.clone(); + let context = plan.plan_base().ctx().clone(); let mut graph = build_graph(plan); graph.parallelism = session .config() @@ -188,6 +189,7 @@ It only indicates the physical clustering of the data, which may improve the per (table, graph) }; + // Ensure writes to `StreamJobTracker` are atomic. let _job_guard = session .env() @@ -199,6 +201,15 @@ It only indicates the physical clustering of the data, which may improve the per table.name.clone(), )); + let run_in_background = session.config().get_background_ddl(); + let create_type = if run_in_background { + CreateType::Background + } else { + CreateType::Foreground + }; + table.create_type = create_type.into(); + + let session = session.clone(); let catalog_writer = session.catalog_writer()?; catalog_writer .create_materialized_view(table, graph) diff --git a/src/frontend/src/handler/create_sink.rs b/src/frontend/src/handler/create_sink.rs index 3a8c701d77432..ddb1d697b856d 100644 --- a/src/frontend/src/handler/create_sink.rs +++ b/src/frontend/src/handler/create_sink.rs @@ -12,17 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashMap; use std::rc::Rc; +use std::sync::LazyLock; use itertools::Itertools; +use maplit::{convert_args, hashmap}; use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_common::catalog::{ConnectionId, DatabaseId, SchemaId, UserId}; -use risingwave_common::error::Result; -use risingwave_connector::sink::catalog::SinkCatalog; +use risingwave_common::error::{ErrorCode, Result}; +use risingwave_connector::sink::catalog::{SinkCatalog, SinkFormatDesc}; +use risingwave_connector::sink::{ + CONNECTOR_TYPE_KEY, SINK_TYPE_OPTION, SINK_USER_FORCE_APPEND_ONLY_OPTION, +}; use risingwave_pb::stream_plan::stream_fragment_graph::Parallelism; use risingwave_sqlparser::ast::{ - CreateSink, CreateSinkStatement, EmitMode, ObjectName, Query, Select, SelectItem, SetExpr, - TableFactor, TableWithJoins, + CreateSink, CreateSinkStatement, EmitMode, Encode, Format, ObjectName, Query, Select, + SelectItem, SetExpr, SinkSchema, TableFactor, TableWithJoins, }; use super::create_mv::get_column_names; @@ -35,8 +41,8 @@ use crate::optimizer::{OptimizerContext, OptimizerContextRef, PlanRef, RelationC use crate::scheduler::streaming_manager::CreatingStreamingJobInfo; use crate::session::SessionImpl; use crate::stream_fragmenter::build_graph; -use crate::utils::resolve_connection_in_with_option; -use crate::Planner; +use crate::utils::resolve_privatelink_in_with_option; +use crate::{Planner, WithOptions}; pub fn gen_sink_query_from_name(from_name: ObjectName) -> Result { let table_factor = TableFactor::Table { @@ -106,7 +112,7 @@ pub fn gen_sink_plan( let mut with_options = context.with_options().clone(); let connection_id = { let conn_id = - resolve_connection_in_with_option(&mut with_options, &sink_schema_name, session)?; + resolve_privatelink_in_with_option(&mut with_options, &sink_schema_name, session)?; conn_id.map(ConnectionId) }; @@ -115,6 +121,29 @@ pub fn gen_sink_plan( context.warn_to_user("EMIT ON WINDOW CLOSE is currently an experimental feature. Please use it with caution."); } + let connector = with_options + .get(CONNECTOR_TYPE_KEY) + .ok_or_else(|| ErrorCode::BindError(format!("missing field '{CONNECTOR_TYPE_KEY}'")))?; + let format_desc = match stmt.sink_schema { + // Case A: new syntax `format ... encode ...` + Some(f) => { + validate_compatibility(connector, &f)?; + Some(bind_sink_format_desc(f)?) + }, + None => match with_options.get(SINK_TYPE_OPTION) { + // Case B: old syntax `type = '...'` + Some(t) => SinkFormatDesc::from_legacy_type(connector, t)?.map(|mut f| { + session.notice_to_user("Consider using the newer syntax `FORMAT ... ENCODE ...` instead of `type = '...'`."); + if let Some(v) = with_options.get(SINK_USER_FORCE_APPEND_ONLY_OPTION) { + f.options.insert(SINK_USER_FORCE_APPEND_ONLY_OPTION.into(), v.into()); + } + f + }), + // Case C: no format + encode required + None => None, + }, + }; + let mut plan_root = Planner::new(context).plan_query(bound)?; if let Some(col_names) = col_names { plan_root.set_out_names(col_names)?; @@ -127,6 +156,7 @@ pub fn gen_sink_plan( emit_on_window_close, db_name.to_owned(), sink_from_table_name, + format_desc, )?; let sink_desc = sink_plan.sink_desc().clone(); let sink_plan: PlanRef = sink_plan.into(); @@ -195,6 +225,108 @@ pub async fn handle_create_sink( Ok(PgResponse::empty_result(StatementType::CREATE_SINK)) } +/// Transforms the (format, encode, options) from sqlparser AST into an internal struct `SinkFormatDesc`. +/// This is an analogy to (part of) [`crate::handler::create_source::try_bind_columns_from_source`] +/// which transforms sqlparser AST `SourceSchemaV2` into `StreamSourceInfo`. +fn bind_sink_format_desc(value: SinkSchema) -> Result { + use risingwave_connector::sink::catalog::{SinkEncode, SinkFormat}; + use risingwave_sqlparser::ast::{Encode as E, Format as F}; + + let format = match value.format { + F::Plain => SinkFormat::AppendOnly, + F::Upsert => SinkFormat::Upsert, + F::Debezium => SinkFormat::Debezium, + f @ (F::Native | F::DebeziumMongo | F::Maxwell | F::Canal) => { + return Err(ErrorCode::BindError(format!("sink format unsupported: {f}")).into()) + } + }; + let encode = match value.row_encode { + E::Json => SinkEncode::Json, + E::Protobuf => SinkEncode::Protobuf, + E::Avro => SinkEncode::Avro, + E::Template => SinkEncode::Template, + e @ (E::Native | E::Csv | E::Bytes) => { + return Err(ErrorCode::BindError(format!("sink encode unsupported: {e}")).into()) + } + }; + let options = WithOptions::try_from(value.row_options.as_slice())?.into_inner(); + + Ok(SinkFormatDesc { + format, + encode, + options, + }) +} + +static CONNECTORS_COMPATIBLE_FORMATS: LazyLock>>> = + LazyLock::new(|| { + use risingwave_connector::sink::kafka::KafkaSink; + use risingwave_connector::sink::kinesis::KinesisSink; + use risingwave_connector::sink::pulsar::PulsarSink; + use risingwave_connector::sink::redis::RedisSink; + use risingwave_connector::sink::Sink as _; + + convert_args!(hashmap!( + KafkaSink::SINK_NAME => hashmap!( + Format::Plain => vec![Encode::Json, Encode::Protobuf], + Format::Upsert => vec![Encode::Json], + Format::Debezium => vec![Encode::Json], + ), + KinesisSink::SINK_NAME => hashmap!( + Format::Plain => vec![Encode::Json], + Format::Upsert => vec![Encode::Json], + Format::Debezium => vec![Encode::Json], + ), + PulsarSink::SINK_NAME => hashmap!( + Format::Plain => vec![Encode::Json], + Format::Upsert => vec![Encode::Json], + Format::Debezium => vec![Encode::Json], + ), + RedisSink::SINK_NAME => hashmap!( + Format::Plain => vec![Encode::Json,Encode::Template], + Format::Upsert => vec![Encode::Json,Encode::Template], + ), + )) + }); +pub fn validate_compatibility(connector: &str, format_desc: &SinkSchema) -> Result<()> { + let compatible_formats = CONNECTORS_COMPATIBLE_FORMATS + .get(connector) + .ok_or_else(|| { + ErrorCode::BindError(format!( + "connector {} is not supported by FORMAT ... ENCODE ... syntax", + connector + )) + })?; + let compatible_encodes = compatible_formats.get(&format_desc.format).ok_or_else(|| { + ErrorCode::BindError(format!( + "connector {} does not support format {:?}", + connector, format_desc.format + )) + })?; + if !compatible_encodes.contains(&format_desc.row_encode) { + return Err(ErrorCode::BindError(format!( + "connector {} does not support format {:?} with encode {:?}", + connector, format_desc.format, format_desc.row_encode + )) + .into()); + } + Ok(()) +} + +/// For `planner_test` crate so that it does not depend directly on `connector` crate just for `SinkFormatDesc`. +impl TryFrom<&WithOptions> for Option { + type Error = risingwave_connector::sink::SinkError; + + fn try_from(value: &WithOptions) -> std::result::Result { + let connector = value.get(CONNECTOR_TYPE_KEY); + let r#type = value.get(SINK_TYPE_OPTION); + match (connector, r#type) { + (Some(c), Some(t)) => SinkFormatDesc::from_legacy_type(c, t), + _ => Ok(None), + } + } +} + #[cfg(test)] pub mod tests { use risingwave_common::catalog::{DEFAULT_DATABASE_NAME, DEFAULT_SCHEMA_NAME}; @@ -218,7 +350,7 @@ pub mod tests { frontend.run_sql(sql).await.unwrap(); let sql = r#"CREATE SINK snk1 FROM mv1 - WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = + WITH (connector = 'jdbc', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '', type = 'append-only', force_append_only = 'true');"#.to_string(); frontend.run_sql(sql).await.unwrap(); diff --git a/src/frontend/src/handler/create_source.rs b/src/frontend/src/handler/create_source.rs index 78e85fca77e0c..6f7de61285cb0 100644 --- a/src/frontend/src/handler/create_source.rs +++ b/src/frontend/src/handler/create_source.rs @@ -27,18 +27,19 @@ use risingwave_common::error::ErrorCode::{self, InvalidInputSyntax, ProtocolErro use risingwave_common::error::{Result, RwError}; use risingwave_common::types::DataType; use risingwave_connector::parser::{ - name_strategy_from_str, schema_to_columns, AvroParserConfig, DebeziumAvroParserConfig, - ProtobufParserConfig, SpecificParserConfig, + schema_to_columns, AvroParserConfig, DebeziumAvroParserConfig, ProtobufParserConfig, + SpecificParserConfig, }; +use risingwave_connector::schema::schema_registry::name_strategy_from_str; use risingwave_connector::source::cdc::{ CITUS_CDC_CONNECTOR, MYSQL_CDC_CONNECTOR, POSTGRES_CDC_CONNECTOR, }; use risingwave_connector::source::datagen::DATAGEN_CONNECTOR; -use risingwave_connector::source::filesystem::S3_CONNECTOR; use risingwave_connector::source::nexmark::source::{get_event_data_types_with_names, EventType}; +use risingwave_connector::source::test_source::TEST_CONNECTOR; use risingwave_connector::source::{ - SourceEncode, SourceFormat, SourceStruct, GOOGLE_PUBSUB_CONNECTOR, KAFKA_CONNECTOR, - KINESIS_CONNECTOR, NATS_CONNECTOR, NEXMARK_CONNECTOR, PULSAR_CONNECTOR, + GOOGLE_PUBSUB_CONNECTOR, KAFKA_CONNECTOR, KINESIS_CONNECTOR, NATS_CONNECTOR, NEXMARK_CONNECTOR, + PULSAR_CONNECTOR, S3_CONNECTOR, S3_V2_CONNECTOR, }; use risingwave_pb::catalog::{ PbSchemaRegistryNameStrategy, PbSource, StreamSourceInfo, WatermarkDesc, @@ -60,7 +61,7 @@ use crate::handler::create_table::{ use crate::handler::util::{get_connector, is_kafka_connector}; use crate::handler::HandlerArgs; use crate::session::SessionImpl; -use crate::utils::resolve_connection_in_with_option; +use crate::utils::resolve_privatelink_in_with_option; use crate::{bind_data_type, WithOptions}; pub(crate) const UPSTREAM_SOURCE_KEY: &str = "connector"; @@ -98,11 +99,7 @@ async fn extract_avro_table_schema( info: &StreamSourceInfo, with_properties: &HashMap, ) -> Result> { - let parser_config = SpecificParserConfig::new( - SourceStruct::new(SourceFormat::Plain, SourceEncode::Avro), - info, - with_properties, - )?; + let parser_config = SpecificParserConfig::new(info, with_properties)?; let conf = AvroParserConfig::new(parser_config.encoding_config).await?; let vec_column_desc = conf.map_to_columns()?; Ok(vec_column_desc @@ -119,11 +116,7 @@ async fn extract_upsert_avro_table_schema( info: &StreamSourceInfo, with_properties: &HashMap, ) -> Result<(Vec, Vec)> { - let parser_config = SpecificParserConfig::new( - SourceStruct::new(SourceFormat::Upsert, SourceEncode::Avro), - info, - with_properties, - )?; + let parser_config = SpecificParserConfig::new(info, with_properties)?; let conf = AvroParserConfig::new(parser_config.encoding_config).await?; let vec_column_desc = conf.map_to_columns()?; let mut vec_column_catalog = vec_column_desc @@ -163,11 +156,7 @@ async fn extract_debezium_avro_table_pk_columns( info: &StreamSourceInfo, with_properties: &HashMap, ) -> Result> { - let parser_config = SpecificParserConfig::new( - SourceStruct::new(SourceFormat::Debezium, SourceEncode::Avro), - info, - with_properties, - )?; + let parser_config = SpecificParserConfig::new(info, with_properties)?; let conf = DebeziumAvroParserConfig::new(parser_config.encoding_config).await?; Ok(conf.extract_pks()?.drain(..).map(|c| c.name).collect()) } @@ -177,11 +166,7 @@ async fn extract_debezium_avro_table_schema( info: &StreamSourceInfo, with_properties: &HashMap, ) -> Result> { - let parser_config = SpecificParserConfig::new( - SourceStruct::new(SourceFormat::Debezium, SourceEncode::Avro), - info, - with_properties, - )?; + let parser_config = SpecificParserConfig::new(info, with_properties)?; let conf = DebeziumAvroParserConfig::new(parser_config.encoding_config).await?; let vec_column_desc = conf.map_to_columns()?; let column_catalog = vec_column_desc @@ -203,13 +188,11 @@ async fn extract_protobuf_table_schema( proto_message_name: schema.message_name.0.clone(), row_schema_location: schema.row_schema_location.0.clone(), use_schema_registry: schema.use_schema_registry, + format: FormatType::Plain.into(), + row_encode: EncodeType::Protobuf.into(), ..Default::default() }; - let parser_config = SpecificParserConfig::new( - SourceStruct::new(SourceFormat::Plain, SourceEncode::Protobuf), - &info, - &with_properties, - )?; + let parser_config = SpecificParserConfig::new(&info, &with_properties)?; let conf = ProtobufParserConfig::new(parser_config.encoding_config).await?; let column_descs = conf.map_to_columns()?; @@ -252,7 +235,7 @@ fn consume_string_from_options( )))) } -fn get_json_schema_location( +pub fn get_json_schema_location( row_options: &mut BTreeMap, ) -> Result> { let schema_location = try_consume_string_from_options(row_options, "schema.location"); @@ -892,6 +875,9 @@ static CONNECTORS_COMPATIBLE_FORMATS: LazyLock hashmap!( Format::Plain => vec![Encode::Csv, Encode::Json], ), + S3_V2_CONNECTOR => hashmap!( + Format::Plain => vec![Encode::Csv, Encode::Json], + ), MYSQL_CDC_CONNECTOR => hashmap!( Format::Plain => vec![Encode::Bytes], Format::Debezium => vec![Encode::Json], @@ -907,6 +893,9 @@ static CONNECTORS_COMPATIBLE_FORMATS: LazyLock hashmap!( Format::Plain => vec![Encode::Json], ), + TEST_CONNECTOR => hashmap!( + Format::Plain => vec![Encode::Json], + ) )) }); @@ -921,8 +910,9 @@ pub fn validate_compatibility( .get(&connector) .ok_or_else(|| { RwError::from(ProtocolError(format!( - "connector {} is not supported", - connector + "connector {:?} is not supported, accept {:?}", + connector, + CONNECTORS_COMPATIBLE_FORMATS.keys() ))) })?; if connector != KAFKA_CONNECTOR { @@ -1068,7 +1058,10 @@ pub async fn handle_create_source( ))); } - let source_schema = stmt.source_schema.into_source_schema_v2(); + let (source_schema, notice) = stmt.source_schema.into_source_schema_v2(); + if let Some(notice) = notice { + session.notice_to_user(notice) + }; let mut with_properties = handler_args.with_options.into_inner().into_iter().collect(); validate_compatibility(&source_schema, &mut with_properties)?; @@ -1122,10 +1115,10 @@ pub async fn handle_create_source( let columns = columns.into_iter().map(|c| c.to_protobuf()).collect_vec(); - // resolve privatelink connection for Kafka source let mut with_options = WithOptions::new(with_properties); + // resolve privatelink connection for Kafka source let connection_id = - resolve_connection_in_with_option(&mut with_options, &schema_name, &session)?; + resolve_privatelink_in_with_option(&mut with_options, &schema_name, &session)?; let definition = handler_args.normalized_sql; let source = PbSource { diff --git a/src/frontend/src/handler/create_table.rs b/src/frontend/src/handler/create_table.rs index 476e15885c65d..19d9a2f25c4b8 100644 --- a/src/frontend/src/handler/create_table.rs +++ b/src/frontend/src/handler/create_table.rs @@ -25,6 +25,7 @@ use risingwave_common::catalog::{ use risingwave_common::constants::hummock::TABLE_OPTION_DUMMY_RETENTION_SECOND; use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; +use risingwave_common::util::value_encoding::DatumToProtoExt; use risingwave_connector::source::external::ExternalTableType; use risingwave_pb::catalog::source::OptionalAssociatedTableId; use risingwave_pb::catalog::{PbSource, PbTable, StreamSourceInfo, WatermarkDesc}; @@ -40,7 +41,7 @@ use super::RwPgResponse; use crate::binder::{bind_data_type, bind_struct_field, Clause}; use crate::catalog::table_catalog::TableVersion; use crate::catalog::{check_valid_column_name, CatalogError, ColumnId}; -use crate::expr::{Expr, ExprImpl}; +use crate::expr::{Expr, ExprImpl, ExprRewriter, InlineNowProcTime}; use crate::handler::create_source::{ bind_source_watermark, check_source_schema, try_bind_columns_from_source, validate_compatibility, UPSTREAM_SOURCE_KEY, @@ -51,7 +52,7 @@ use crate::optimizer::property::{Order, RequiredDist}; use crate::optimizer::{OptimizerContext, OptimizerContextRef, PlanRef, PlanRoot}; use crate::session::{CheckRelationError, SessionImpl}; use crate::stream_fragmenter::build_graph; -use crate::utils::resolve_connection_in_with_option; +use crate::utils::resolve_privatelink_in_with_option; use crate::{Binder, TableCatalog, WithOptions}; /// Column ID generator for a new table or a new version of an existing table to alter. @@ -210,44 +211,29 @@ fn check_generated_column_constraints( .iter() .any(|c| c == referred_generated_column) { - return Err(ErrorCode::BindError( - format!("Generated can not reference another generated column, but here generated column \"{}\" referenced another generated column \"{}\"", column_name, referred_generated_column), - ) + return Err(ErrorCode::BindError(format!( + "Generated can not reference another generated column. \ + But here generated column \"{}\" referenced another generated column \"{}\"", + column_name, referred_generated_column + )) .into()); } } if pk_column_ids.contains(&column_id) && expr.is_impure() { - return Err(ErrorCode::BindError( - format!("Generated columns should not be part of the primary key. Here column \"{}\" is defined as part of the primary key.", column_name), - ) + return Err(ErrorCode::BindError(format!( + "Generated columns with impure expressions should not be part of the primary key. \ + Here column \"{}\" is defined as part of the primary key.", + column_name + )) .into()); } Ok(()) } -fn check_default_column_constraints( - expr: &ExprImpl, - column_catalogs: &[ColumnCatalog], -) -> Result<()> { - let input_refs = expr.collect_input_refs(column_catalogs.len()); - if input_refs.count_ones(..) > 0 { - return Err(ErrorCode::BindError( - "Default can not reference another column, and you should try generated column instead." - .to_string(), - ) - .into()); - } - if expr.is_impure() { - return Err( - ErrorCode::BindError("impure default expr is not supported.".to_string()).into(), - ); - } - Ok(()) -} - -/// Binds constraints that can be only specified in column definitions. +/// Binds constraints that can be only specified in column definitions, +/// currently generated columns and default columns. pub fn bind_sql_column_constraints( session: &SessionImpl, table_name: String, @@ -303,12 +289,34 @@ pub fn bind_sql_column_constraints( .bind_expr(expr)? .cast_assign(column_catalogs[idx].data_type().clone())?; - check_default_column_constraints(&expr_impl, column_catalogs)?; - - column_catalogs[idx].column_desc.generated_or_default_column = - Some(GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc { - expr: Some(expr_impl.to_expr_proto()), - })); + // Rewrite expressions to evaluate a snapshot value, used for missing values in the case of + // schema change. + // + // TODO: Currently we don't support impure expressions other than `now()` (like `random()`), + // so the rewritten expression should almost always be pure and we directly call `fold_const` + // here. Actually we do not require purity of the expression here since we're only to get a + // snapshot value. + let rewritten_expr_impl = + InlineNowProcTime::new(session.pinned_snapshot().epoch()) + .rewrite_expr(expr_impl.clone()); + + if let Some(snapshot_value) = rewritten_expr_impl.try_fold_const() { + let snapshot_value = snapshot_value?; + + column_catalogs[idx].column_desc.generated_or_default_column = Some( + GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc { + snapshot_value: Some(snapshot_value.to_protobuf()), + expr: Some(expr_impl.to_expr_proto()), /* persist the original expression */ + }), + ); + } else { + return Err(ErrorCode::BindError(format!( + "Default expression used in column `{}` cannot be evaluated. \ + Use generated columns instead if you mean to reference other columns.", + column.name + )) + .into()); + } } _ => {} } @@ -653,7 +661,7 @@ fn gen_table_plan_inner( // resolve privatelink connection for Table backed by Kafka source let mut with_options = WithOptions::new(properties); let connection_id = - resolve_connection_in_with_option(&mut with_options, &schema_name, &session)?; + resolve_privatelink_in_with_option(&mut with_options, &schema_name, &session)?; let source = source_info.map(|source_info| PbSource { id: TableId::placeholder().table_id, diff --git a/src/frontend/src/handler/describe.rs b/src/frontend/src/handler/describe.rs index f2fb89d02a7dc..4100b9a20be02 100644 --- a/src/frontend/src/handler/describe.rs +++ b/src/frontend/src/handler/describe.rs @@ -18,7 +18,7 @@ use itertools::Itertools; use pgwire::pg_field_descriptor::PgFieldDescriptor; use pgwire::pg_response::{PgResponse, StatementType}; use pgwire::types::Row; -use risingwave_common::catalog::ColumnDesc; +use risingwave_common::catalog::{ColumnCatalog, ColumnDesc}; use risingwave_common::error::Result; use risingwave_common::types::DataType; use risingwave_sqlparser::ast::{display_comma_separated, ObjectName}; @@ -34,65 +34,66 @@ pub fn handle_describe(handler_args: HandlerArgs, table_name: ObjectName) -> Res let mut binder = Binder::new_for_system(&session); let relation = binder.bind_relation_by_name(table_name.clone(), None, false)?; // For Source, it doesn't have table catalog so use get source to get column descs. - let (columns, pk_columns, indices): (Vec, Vec, Vec>) = { - let (column_catalogs, pk_column_catalogs, indices) = match relation { - Relation::Source(s) => { - let pk_column_catalogs = s - .catalog - .pk_col_ids - .iter() - .map(|&column_id| { - s.catalog - .columns - .iter() - .filter(|x| x.column_id() == column_id) - .exactly_one() - .unwrap() - .clone() - }) - .collect_vec(); - (s.catalog.columns, pk_column_catalogs, vec![]) - } - Relation::BaseTable(t) => { - let pk_column_catalogs = t - .table_catalog - .pk() - .iter() - .map(|x| t.table_catalog.columns[x.column_index].clone()) - .collect_vec(); - (t.table_catalog.columns, pk_column_catalogs, t.table_indexes) - } - Relation::SystemTable(t) => { - let pk_column_catalogs = t - .sys_table_catalog - .pk - .iter() - .map(|idx| t.sys_table_catalog.columns[*idx].clone()) - .collect_vec(); - ( - t.sys_table_catalog.columns.clone(), - pk_column_catalogs, - vec![], - ) - } - _ => { - return Err( - CatalogError::NotFound("table or source", table_name.to_string()).into(), - ); - } - }; - ( - column_catalogs - .into_iter() - .filter(|c| !c.is_hidden) - .map(|c| c.column_desc) - .collect(), - pk_column_catalogs - .into_iter() - .map(|c| c.column_desc) - .collect(), - indices, - ) + let (columns, pk_columns, dist_columns, indices): ( + Vec, + Vec, + Vec, + Vec>, + ) = match relation { + Relation::Source(s) => { + let pk_column_catalogs = s + .catalog + .pk_col_ids + .iter() + .map(|&column_id| { + s.catalog + .columns + .iter() + .filter(|x| x.column_id() == column_id) + .map(|x| x.column_desc.clone()) + .exactly_one() + .unwrap() + }) + .collect_vec(); + (s.catalog.columns, pk_column_catalogs, vec![], vec![]) + } + Relation::BaseTable(t) => { + let pk_column_catalogs = t + .table_catalog + .pk() + .iter() + .map(|x| t.table_catalog.columns[x.column_index].column_desc.clone()) + .collect_vec(); + let dist_columns = t + .table_catalog + .distribution_key() + .iter() + .map(|idx| t.table_catalog.columns[*idx].column_desc.clone()) + .collect_vec(); + ( + t.table_catalog.columns, + pk_column_catalogs, + dist_columns, + t.table_indexes, + ) + } + Relation::SystemTable(t) => { + let pk_column_catalogs = t + .sys_table_catalog + .pk + .iter() + .map(|idx| t.sys_table_catalog.columns[*idx].column_desc.clone()) + .collect_vec(); + ( + t.sys_table_catalog.columns.clone(), + pk_column_catalogs, + vec![], + vec![], + ) + } + _ => { + return Err(CatalogError::NotFound("table or source", table_name.to_string()).into()); + } }; // Convert all column descs to rows @@ -109,6 +110,22 @@ pub fn handle_describe(handler_args: HandlerArgs, table_name: ObjectName) -> Res ) .into(), ), + None, + ])); + } + + // Convert distribution keys to rows + if !dist_columns.is_empty() { + rows.push(Row::new(vec![ + Some("distribution key".into()), + Some( + display_comma_separated( + &dist_columns.into_iter().map(|col| col.name).collect_vec(), + ) + .to_string() + .into(), + ), + None, ])); } @@ -138,6 +155,7 @@ pub fn handle_describe(handler_args: HandlerArgs, table_name: ObjectName) -> Res .into(), ) }, + None, ]) })); @@ -156,6 +174,11 @@ pub fn handle_describe(handler_args: HandlerArgs, table_name: ObjectName) -> Res DataType::Varchar.to_oid(), DataType::Varchar.type_len(), ), + PgFieldDescriptor::new( + "Is Hidden".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), ], ) .into()) @@ -208,6 +231,7 @@ mod tests { "v3".into() => "integer".into(), "v4".into() => "integer".into(), "primary key".into() => "v3".into(), + "distribution key".into() => "v3".into(), "idx1".into() => "index(v1 DESC, v2 ASC, v3 ASC) include(v4) distributed by(v1)".into(), }; diff --git a/src/frontend/src/handler/drop_sink.rs b/src/frontend/src/handler/drop_sink.rs index 698791dceed6b..6b1a864e03964 100644 --- a/src/frontend/src/handler/drop_sink.rs +++ b/src/frontend/src/handler/drop_sink.rs @@ -72,7 +72,7 @@ mod tests { async fn test_drop_sink_handler() { let sql_create_table = "create table t (v1 smallint primary key);"; let sql_create_mv = "create materialized view mv as select v1 from t;"; - let sql_create_sink = "create sink snk from mv with( connector = 'mysql')"; + let sql_create_sink = "create sink snk from mv with( connector = 'kafka')"; let sql_drop_sink = "drop sink snk;"; let frontend = LocalFrontend::new(Default::default()).await; frontend.run_sql(sql_create_table).await.unwrap(); diff --git a/src/frontend/src/handler/explain.rs b/src/frontend/src/handler/explain.rs index b31aa33aff8d0..5de7ec95b38bd 100644 --- a/src/frontend/src/handler/explain.rs +++ b/src/frontend/src/handler/explain.rs @@ -63,10 +63,13 @@ async fn do_handle_explain( .. } => { // TODO(st1page): refacor it - let notice: Option = Default::default(); - - let source_schema = - source_schema.map(|source_schema| source_schema.into_source_schema_v2()); + let (source_schema, notice) = match source_schema { + Some(s) => { + let (s, notice) = s.into_source_schema_v2(); + (Some(s), notice) + } + None => (None, None), + }; let with_options = context.with_options(); let plan = match check_create_table_with_source(with_options, source_schema)? { Some(s) => { diff --git a/src/frontend/src/handler/mod.rs b/src/frontend/src/handler/mod.rs index 4821a12455e04..174ed23e03ec5 100644 --- a/src/frontend/src/handler/mod.rs +++ b/src/frontend/src/handler/mod.rs @@ -73,6 +73,7 @@ mod show; mod transaction; pub mod util; pub mod variable; +mod wait; /// The [`PgResponseBuilder`] used by RisingWave. pub type RwPgResponseBuilder = PgResponseBuilder; @@ -248,11 +249,13 @@ pub async fn handle( ) .await; } - // TODO(st1page): refacor it - let notice = Default::default(); - let source_schema = - source_schema.map(|source_schema| source_schema.into_source_schema_v2()); - + let (source_schema, notice) = match source_schema { + Some(s) => { + let (s, notice) = s.into_source_schema_v2(); + (Some(s), notice) + } + None => (None, None), + }; create_table::handle_create_table( handler_args, name, @@ -417,6 +420,7 @@ pub async fn handle( } } Statement::Flush => flush::handle_flush(handler_args).await, + Statement::Wait => wait::handle_wait(handler_args).await, Statement::SetVariable { local: _, variable, diff --git a/src/frontend/src/handler/query.rs b/src/frontend/src/handler/query.rs index 79289071dd889..e11562bccb467 100644 --- a/src/frontend/src/handler/query.rs +++ b/src/frontend/src/handler/query.rs @@ -464,18 +464,12 @@ async fn distribute_execute( #[expect(clippy::unused_async)] async fn local_execute(session: Arc, query: Query) -> Result { let front_env = session.env(); + // TODO: if there's no table scan, we don't need to acquire snapshot. let snapshot = session.pinned_snapshot(); // TODO: Passing sql here - let execution = LocalQueryExecution::new( - query, - front_env.clone(), - "", - snapshot, - session.auth_context(), - session.reset_cancel_query_flag(), - ); + let execution = LocalQueryExecution::new(query, front_env.clone(), "", snapshot, session); Ok(execution.stream_rows()) } diff --git a/src/frontend/src/handler/show.rs b/src/frontend/src/handler/show.rs index 9ad9e905faa27..88a9b1e694e33 100644 --- a/src/frontend/src/handler/show.rs +++ b/src/frontend/src/handler/show.rs @@ -18,12 +18,12 @@ use itertools::Itertools; use pgwire::pg_field_descriptor::PgFieldDescriptor; use pgwire::pg_response::{PgResponse, StatementType}; use pgwire::types::Row; -use risingwave_common::catalog::{ColumnDesc, DEFAULT_SCHEMA_NAME}; +use risingwave_common::catalog::{ColumnCatalog, DEFAULT_SCHEMA_NAME}; use risingwave_common::error::{ErrorCode, Result}; use risingwave_common::types::DataType; use risingwave_common::util::addr::HostAddr; use risingwave_connector::source::kafka::PRIVATELINK_CONNECTION; -use risingwave_expr::vector_op::like::{i_like_default, like_default}; +use risingwave_expr::scalar::like::{i_like_default, like_default}; use risingwave_pb::catalog::connection; use risingwave_sqlparser::ast::{ Ident, ObjectName, ShowCreateType, ShowObject, ShowStatementFilter, @@ -36,14 +36,15 @@ use crate::catalog::{CatalogError, IndexCatalog}; use crate::handler::util::{col_descs_to_rows, indexes_to_rows}; use crate::handler::HandlerArgs; use crate::session::SessionImpl; +use crate::utils::infer_stmt_row_desc::infer_show_object; pub fn get_columns_from_table( session: &SessionImpl, table_name: ObjectName, -) -> Result> { +) -> Result> { let mut binder = Binder::new_for_system(session); let relation = binder.bind_relation_by_name(table_name.clone(), None, false)?; - let catalogs = match relation { + let column_catalogs = match relation { Relation::Source(s) => s.catalog.columns, Relation::BaseTable(t) => t.table_catalog.columns, Relation::SystemTable(t) => t.sys_table_catalog.columns.clone(), @@ -52,11 +53,7 @@ pub fn get_columns_from_table( } }; - Ok(catalogs - .into_iter() - .filter(|c| !c.is_hidden) - .map(|c| c.column_desc) - .collect()) + Ok(column_catalogs) } pub fn get_indexes_from_table( @@ -95,6 +92,7 @@ pub async fn handle_show_object( ) .into()); } + let row_desc = infer_show_object(&command); let catalog_reader = session.env().catalog_reader(); @@ -146,21 +144,7 @@ pub async fn handle_show_object( let rows = col_descs_to_rows(columns); return Ok(PgResponse::builder(StatementType::SHOW_COMMAND) - .values( - rows.into(), - vec![ - PgFieldDescriptor::new( - "Name".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Type".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ], - ) + .values(rows.into(), row_desc) .into()); } ShowObject::Indexes { table } => { @@ -168,36 +152,7 @@ pub async fn handle_show_object( let rows = indexes_to_rows(indexes); return Ok(PgResponse::builder(StatementType::SHOW_COMMAND) - .values( - rows.into(), - vec![ - PgFieldDescriptor::new( - "Name".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "On".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Key".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Include".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Distributed By".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ], - ) + .values(rows.into(), row_desc) .into()); } ShowObject::Connection { schema } => { @@ -246,26 +201,7 @@ pub async fn handle_show_object( }) .collect_vec(); return Ok(PgResponse::builder(StatementType::SHOW_COMMAND) - .values( - rows.into(), - vec![ - PgFieldDescriptor::new( - "Name".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Type".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Properties".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ], - ) + .values(rows.into(), row_desc) .into()); } ShowObject::Function { schema } => { @@ -284,36 +220,7 @@ pub async fn handle_show_object( }) .collect_vec(); return Ok(PgResponse::builder(StatementType::SHOW_COMMAND) - .values( - rows.into(), - vec![ - PgFieldDescriptor::new( - "Name".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Arguments".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Return Type".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Language".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Link".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ], - ) + .values(rows.into(), row_desc) .into()); } ShowObject::Cluster => { @@ -341,41 +248,7 @@ pub async fn handle_show_object( }) .collect_vec(); return Ok(PgResponse::builder(StatementType::SHOW_COMMAND) - .values( - rows.into(), - vec![ - PgFieldDescriptor::new( - "Addr".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "State".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Parallel Units".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Is Streaming".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Is Serving".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Is Unschedulable".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ], - ) + .values(rows.into(), row_desc) .into()); } ShowObject::Jobs => { @@ -391,26 +264,7 @@ pub async fn handle_show_object( }) .collect_vec(); return Ok(PgResponse::builder(StatementType::SHOW_COMMAND) - .values( - rows.into(), - vec![ - PgFieldDescriptor::new( - "Id".to_owned(), - DataType::Int64.to_oid(), - DataType::Int64.type_len(), - ), - PgFieldDescriptor::new( - "Statement".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Progress".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ], - ) + .values(rows.into(), row_desc) .into()); } }; @@ -575,14 +429,16 @@ mod tests { let expected_columns: HashMap = maplit::hashmap! { "id".into() => "integer".into(), - "country.zipcode".into() => "varchar".into(), + "country.zipcode".into() => "character varying".into(), "zipcode".into() => "bigint".into(), - "country.city.address".into() => "varchar".into(), - "country.address".into() => "varchar".into(), + "country.city.address".into() => "character varying".into(), + "country.address".into() => "character varying".into(), "country.city".into() => "test.City".into(), - "country.city.zipcode".into() => "varchar".into(), + "country.city.zipcode".into() => "character varying".into(), "rate".into() => "real".into(), "country".into() => "test.Country".into(), + "_rw_kafka_timestamp".into() => "timestamp with time zone".into(), + "_row_id".into() => "serial".into(), }; assert_eq!(columns, expected_columns); diff --git a/src/frontend/src/handler/util.rs b/src/frontend/src/handler/util.rs index 05bab2ea5404a..66494be928d42 100644 --- a/src/frontend/src/handler/util.rs +++ b/src/frontend/src/handler/util.rs @@ -26,13 +26,12 @@ use pgwire::pg_server::BoxedError; use pgwire::types::{Format, FormatIterator, Row}; use pin_project_lite::pin_project; use risingwave_common::array::DataChunk; -use risingwave_common::catalog::{ColumnDesc, Field}; +use risingwave_common::catalog::{ColumnCatalog, Field}; use risingwave_common::error::{ErrorCode, Result as RwResult}; use risingwave_common::row::Row as _; -use risingwave_common::types::{DataType, ScalarRefImpl}; +use risingwave_common::types::{DataType, ScalarRefImpl, Timestamptz}; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_connector::source::KAFKA_CONNECTOR; -use risingwave_expr::vector_op::timestamptz::timestamptz_to_string; use risingwave_sqlparser::ast::display_comma_separated; use crate::catalog::IndexCatalog; @@ -134,14 +133,13 @@ fn timestamptz_to_string_with_session_data( d: ScalarRefImpl<'_>, session_data: &StaticSessionData, ) -> Bytes { - let mut buf = String::new(); - match d { - ScalarRefImpl::<'_>::Timestamptz(tz) => { - timestamptz_to_string(tz, &session_data.timezone, &mut buf).unwrap() - } - _ => panic!("expect timestamptz"), - }; - buf.into() + let tz = d.into_timestamptz(); + let time_zone = Timestamptz::lookup_time_zone(&session_data.timezone).unwrap(); + let instant_local = tz.to_datetime_in_zone(time_zone); + instant_local + .format("%Y-%m-%d %H:%M:%S%.f%:z") + .to_string() + .into() } fn to_pg_rows( @@ -172,11 +170,12 @@ fn to_pg_rows( } /// Convert column descs to rows which conclude name and type -pub fn col_descs_to_rows(columns: Vec) -> Vec { +pub fn col_descs_to_rows(columns: Vec) -> Vec { columns .iter() .flat_map(|col| { - col.flatten() + col.column_desc + .flatten() .into_iter() .map(|c| { let type_name = if let DataType::Struct { .. } = c.data_type { @@ -184,7 +183,11 @@ pub fn col_descs_to_rows(columns: Vec) -> Vec { } else { c.data_type.to_string() }; - Row::new(vec![Some(c.name.into()), Some(type_name.into())]) + Row::new(vec![ + Some(c.name.into()), + Some(type_name.into()), + Some(col.is_hidden.to_string().into()), + ]) }) .collect_vec() }) @@ -256,6 +259,7 @@ pub fn get_connection_name(with_properties: &BTreeMap) -> Option .get(CONNECTION_NAME_KEY) .map(|s| s.to_lowercase()) } + #[cfg(test)] mod tests { use bytes::BytesMut; diff --git a/src/storage/backup/cmd/src/bin/backup_restore.rs b/src/frontend/src/handler/wait.rs similarity index 53% rename from src/storage/backup/cmd/src/bin/backup_restore.rs rename to src/frontend/src/handler/wait.rs index 39d664e2247ee..83f2784ec8c17 100644 --- a/src/storage/backup/cmd/src/bin/backup_restore.rs +++ b/src/frontend/src/handler/wait.rs @@ -12,18 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +use pgwire::pg_response::{PgResponse, StatementType}; +use risingwave_common::error::Result; -use risingwave_backup::error::BackupResult; +use super::RwPgResponse; +use crate::handler::HandlerArgs; +use crate::session::SessionImpl; -#[cfg_attr(coverage, no_coverage)] -fn main() -> BackupResult<()> { - use clap::Parser; - let opts = risingwave_meta::backup_restore::RestoreOpts::parse(); - risingwave_rt::init_risingwave_logger(risingwave_rt::LoggerSettings::new("backup_restore")); - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap() - .block_on(risingwave_meta::backup_restore::restore(opts)) +pub(super) async fn handle_wait(handler_args: HandlerArgs) -> Result { + do_wait(&handler_args.session).await?; + Ok(PgResponse::empty_result(StatementType::WAIT)) +} + +pub(crate) async fn do_wait(session: &SessionImpl) -> Result<()> { + let client = session.env().meta_client(); + client.wait().await?; + Ok(()) } diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs index 2d1aaea26b3c0..450f49b6394cf 100644 --- a/src/frontend/src/lib.rs +++ b/src/frontend/src/lib.rs @@ -15,7 +15,7 @@ #![allow(clippy::derive_partial_eq_without_eq)] #![feature(map_try_insert)] #![feature(negative_impls)] -#![feature(generators)] +#![feature(coroutines)] #![feature(proc_macro_hygiene, stmt_expr_attributes)] #![feature(trait_alias)] #![feature(extract_if)] @@ -32,9 +32,12 @@ #![feature(extend_one)] #![feature(type_alias_impl_trait)] #![feature(impl_trait_in_assoc_type)] -#![feature(async_fn_in_trait)] +#![feature(result_flattening)] #![recursion_limit = "256"] +#[cfg(test)] +risingwave_expr_impl::enable!(); + #[macro_use] mod catalog; pub use catalog::TableCatalog; diff --git a/src/frontend/src/meta_client.rs b/src/frontend/src/meta_client.rs index 0ab4fe6d5993b..d37c5dec127f1 100644 --- a/src/frontend/src/meta_client.rs +++ b/src/frontend/src/meta_client.rs @@ -18,7 +18,10 @@ use risingwave_common::system_param::reader::SystemParamsReader; use risingwave_pb::backup_service::MetaSnapshotMetadata; use risingwave_pb::catalog::Table; use risingwave_pb::ddl_service::DdlProgress; -use risingwave_pb::hummock::HummockSnapshot; +use risingwave_pb::hummock::write_limits::WriteLimit; +use risingwave_pb::hummock::{ + BranchedObject, CompactionGroupInfo, HummockSnapshot, HummockVersion, HummockVersionDelta, +}; use risingwave_pb::meta::cancel_creating_jobs_request::PbJobs; use risingwave_pb::meta::list_actor_states_response::ActorState; use risingwave_pb::meta::list_fragment_distribution_response::FragmentDistribution; @@ -40,6 +43,8 @@ pub trait FrontendMetaClient: Send + Sync { async fn flush(&self, checkpoint: bool) -> Result; + async fn wait(&self) -> Result<()>; + async fn cancel_creating_jobs(&self, jobs: PbJobs) -> Result>; async fn list_table_fragments( @@ -70,6 +75,26 @@ pub trait FrontendMetaClient: Send + Sync { async fn list_ddl_progress(&self) -> Result>; async fn get_tables(&self, table_ids: &[u32]) -> Result>; + + /// Returns vector of (worker_id, min_pinned_version_id) + async fn list_hummock_pinned_versions(&self) -> Result>; + + /// Returns vector of (worker_id, min_pinned_snapshot_id) + async fn list_hummock_pinned_snapshots(&self) -> Result>; + + async fn get_hummock_current_version(&self) -> Result; + + async fn get_hummock_checkpoint_version(&self) -> Result; + + async fn list_version_deltas(&self) -> Result>; + + async fn list_branched_objects(&self) -> Result>; + + async fn list_hummock_compaction_group_configs(&self) -> Result>; + + async fn list_hummock_active_write_limits(&self) -> Result>; + + async fn list_hummock_meta_configs(&self) -> Result>; } pub struct FrontendMetaClientImpl(pub MetaClient); @@ -88,6 +113,10 @@ impl FrontendMetaClient for FrontendMetaClientImpl { self.0.flush(checkpoint).await } + async fn wait(&self) -> Result<()> { + self.0.wait().await + } + async fn cancel_creating_jobs(&self, infos: PbJobs) -> Result> { self.0.cancel_creating_jobs(infos).await } @@ -145,4 +174,69 @@ impl FrontendMetaClient for FrontendMetaClientImpl { let tables = self.0.get_tables(table_ids).await?; Ok(tables) } + + async fn list_hummock_pinned_versions(&self) -> Result> { + let pinned_versions = self + .0 + .risectl_get_pinned_versions_summary() + .await? + .summary + .unwrap() + .pinned_versions; + let ret = pinned_versions + .into_iter() + .map(|v| (v.context_id, v.min_pinned_id)) + .collect(); + Ok(ret) + } + + async fn list_hummock_pinned_snapshots(&self) -> Result> { + let pinned_snapshots = self + .0 + .risectl_get_pinned_snapshots_summary() + .await? + .summary + .unwrap() + .pinned_snapshots; + let ret = pinned_snapshots + .into_iter() + .map(|s| (s.context_id, s.minimal_pinned_snapshot)) + .collect(); + Ok(ret) + } + + async fn get_hummock_current_version(&self) -> Result { + self.0.get_current_version().await + } + + async fn get_hummock_checkpoint_version(&self) -> Result { + self.0 + .risectl_get_checkpoint_hummock_version() + .await + .map(|v| v.checkpoint_version.unwrap()) + } + + async fn list_version_deltas(&self) -> Result> { + // FIXME #8612: there can be lots of version deltas, so better to fetch them by pages and refactor `SysRowSeqScanExecutor` to yield multiple chunks. + self.0 + .list_version_deltas(0, u32::MAX, u64::MAX) + .await + .map(|v| v.version_deltas) + } + + async fn list_branched_objects(&self) -> Result> { + self.0.list_branched_object().await + } + + async fn list_hummock_compaction_group_configs(&self) -> Result> { + self.0.risectl_list_compaction_group().await + } + + async fn list_hummock_active_write_limits(&self) -> Result> { + self.0.list_active_write_limit().await + } + + async fn list_hummock_meta_configs(&self) -> Result> { + self.0.list_hummock_meta_config().await + } } diff --git a/src/frontend/src/optimizer/logical_optimization.rs b/src/frontend/src/optimizer/logical_optimization.rs index 4d958d21ec044..b2047d7cae089 100644 --- a/src/frontend/src/optimizer/logical_optimization.rs +++ b/src/frontend/src/optimizer/logical_optimization.rs @@ -189,6 +189,7 @@ static GENERAL_UNNESTING_PUSH_DOWN_APPLY: LazyLock = LazyLock ApplyUnionTransposeRule::create(), ApplyOverWindowTransposeRule::create(), ApplyExpandTransposeRule::create(), + ApplyHopWindowTransposeRule::create(), CrossJoinEliminateRule::create(), ApplyShareEliminateRule::create(), ], @@ -255,6 +256,14 @@ static CONVERT_DISTINCT_AGG_FOR_BATCH: LazyLock = LazyLock::n ) }); +static SIMPLIFY_AGG: LazyLock = LazyLock::new(|| { + OptimizationStage::new( + "Simplify Aggregation", + vec![AggGroupBySimplifyRule::create(), AggCallMergeRule::create()], + ApplyOrder::TopDown, + ) +}); + static JOIN_COMMUTE: LazyLock = LazyLock::new(|| { OptimizationStage::new( "Join Commute".to_string(), @@ -558,6 +567,8 @@ impl LogicalOptimizer { plan.optimize_by_rules(&CONVERT_DISTINCT_AGG_FOR_STREAM) }; + plan = plan.optimize_by_rules(&SIMPLIFY_AGG); + plan = plan.optimize_by_rules(&JOIN_COMMUTE); // Do a final column pruning and predicate pushing down to clean up the plan. @@ -630,6 +641,8 @@ impl LogicalOptimizer { // Convert distinct aggregates. plan = plan.optimize_by_rules(&CONVERT_DISTINCT_AGG_FOR_BATCH); + plan = plan.optimize_by_rules(&SIMPLIFY_AGG); + plan = plan.optimize_by_rules(&JOIN_COMMUTE); // Do a final column pruning and predicate pushing down to clean up the plan. diff --git a/src/frontend/src/optimizer/mod.rs b/src/frontend/src/optimizer/mod.rs index 8daa0e7a45b31..0df387b0a53d5 100644 --- a/src/frontend/src/optimizer/mod.rs +++ b/src/frontend/src/optimizer/mod.rs @@ -42,6 +42,7 @@ use risingwave_common::catalog::{ColumnCatalog, ColumnId, ConflictBehavior, Fiel use risingwave_common::error::{ErrorCode, Result}; use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_common::util::iter_util::ZipEqDebug; +use risingwave_connector::sink::catalog::SinkFormatDesc; use risingwave_pb::catalog::WatermarkDesc; use self::heuristic_optimizer::ApplyOrder; @@ -56,7 +57,6 @@ use self::plan_visitor::{has_batch_exchange, CardinalityVisitor}; use self::property::{Cardinality, RequiredDist}; use self::rule::*; use crate::catalog::table_catalog::{TableType, TableVersion}; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::{ BatchExchange, PlanNodeType, PlanTreeNode, RewriteExprsRecursive, }; @@ -448,6 +448,7 @@ impl PlanRoot { let exprs = LogicalSource::derive_output_exprs_from_generated_columns(&columns)?; if let Some(exprs) = exprs { let logical_project = generic::Project::new(exprs, stream_plan); + // The project node merges a chunk if it has an ungenerated row id as stream key. stream_plan = StreamProject::new(logical_project).into(); } @@ -556,6 +557,7 @@ impl PlanRoot { emit_on_window_close: bool, db_name: String, sink_from_table_name: String, + format_desc: Option, ) -> Result { let stream_plan = self.gen_optimized_stream_plan(emit_on_window_close)?; @@ -570,6 +572,7 @@ impl PlanRoot { self.out_names.clone(), definition, properties, + format_desc, ) } @@ -613,7 +616,7 @@ fn exist_and_no_exchange_before(plan: &PlanRef, is_candidate: fn(&PlanRef) -> bo fn require_additional_exchange_on_root_in_distributed_mode(plan: PlanRef) -> bool { fn is_user_table(plan: &PlanRef) -> bool { plan.as_batch_seq_scan() - .map(|node| !node.logical().is_sys_table) + .map(|node| !node.core().is_sys_table) .unwrap_or(false) } @@ -646,7 +649,7 @@ fn require_additional_exchange_on_root_in_distributed_mode(plan: PlanRef) -> boo fn require_additional_exchange_on_root_in_local_mode(plan: PlanRef) -> bool { fn is_user_table(plan: &PlanRef) -> bool { plan.as_batch_seq_scan() - .map(|node| !node.logical().is_sys_table) + .map(|node| !node.core().is_sys_table) .unwrap_or(false) } diff --git a/src/frontend/src/optimizer/plan_expr_visitor/expr_counter.rs b/src/frontend/src/optimizer/plan_expr_visitor/expr_counter.rs index 3bddc479910c2..c664016b779da 100644 --- a/src/frontend/src/optimizer/plan_expr_visitor/expr_counter.rs +++ b/src/frontend/src/optimizer/plan_expr_visitor/expr_counter.rs @@ -23,7 +23,9 @@ pub struct CseExprCounter { pub counter: HashMap, } -impl ExprVisitor<()> for CseExprCounter { +impl ExprVisitor for CseExprCounter { + type Result = (); + fn merge(_: (), _: ()) {} fn visit_expr(&mut self, expr: &ExprImpl) { diff --git a/src/frontend/src/optimizer/plan_expr_visitor/input_ref_counter.rs b/src/frontend/src/optimizer/plan_expr_visitor/input_ref_counter.rs index 45cdeaab55a78..382dc74222c9f 100644 --- a/src/frontend/src/optimizer/plan_expr_visitor/input_ref_counter.rs +++ b/src/frontend/src/optimizer/plan_expr_visitor/input_ref_counter.rs @@ -22,7 +22,9 @@ pub struct InputRefCounter { pub counter: HashMap, } -impl ExprVisitor<()> for InputRefCounter { +impl ExprVisitor for InputRefCounter { + type Result = (); + fn merge(_: (), _: ()) {} fn visit_input_ref(&mut self, input_ref: &InputRef) { diff --git a/src/frontend/src/optimizer/plan_node/batch.rs b/src/frontend/src/optimizer/plan_node/batch.rs index 2ac1e278f7d8b..2cb2360b3e51d 100644 --- a/src/frontend/src/optimizer/plan_node/batch.rs +++ b/src/frontend/src/optimizer/plan_node/batch.rs @@ -12,9 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::generic::GenericPlanRef; +use super::generic::PhysicalPlanRef; use crate::optimizer::property::Order; -pub trait BatchPlanRef: GenericPlanRef { +/// A subtrait of [`PhysicalPlanRef`] for batch plans. +/// +/// Due to the lack of refactoring, all plan nodes currently implement this trait +/// through [`super::PlanBase`]. One may still use this trait as a bound for +/// accessing a batch plan, in contrast to [`GenericPlanRef`] or +/// [`PhysicalPlanRef`]. +/// +/// [`GenericPlanRef`]: super::generic::GenericPlanRef +pub trait BatchPlanRef: PhysicalPlanRef { fn order(&self) -> &Order; } diff --git a/src/frontend/src/optimizer/plan_node/batch_delete.rs b/src/frontend/src/optimizer/plan_node/batch_delete.rs index 600ec6827e3eb..85d22a46b450e 100644 --- a/src/frontend/src/optimizer/plan_node/batch_delete.rs +++ b/src/frontend/src/optimizer/plan_node/batch_delete.rs @@ -27,35 +27,32 @@ use crate::optimizer::property::{Distribution, Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchDelete { pub base: PlanBase, - pub logical: generic::Delete, + pub core: generic::Delete, } impl BatchDelete { - pub fn new(logical: generic::Delete) -> Self { - assert_eq!(logical.input.distribution(), &Distribution::Single); - let base: PlanBase = PlanBase::new_batch_from_logical( - &logical, - logical.input.distribution().clone(), - Order::any(), - ); - Self { base, logical } + pub fn new(core: generic::Delete) -> Self { + assert_eq!(core.input.distribution(), &Distribution::Single); + let base: PlanBase = + PlanBase::new_batch_with_core(&core, core.input.distribution().clone(), Order::any()); + Self { base, core } } } impl PlanTreeNodeUnary for BatchDelete { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut core = self.logical.clone(); + let mut core = self.core.clone(); core.input = input; Self::new(core) } } impl_plan_tree_node_for_unary! { BatchDelete } -impl_distill_by_unit!(BatchDelete, logical, "BatchDelete"); +impl_distill_by_unit!(BatchDelete, core, "BatchDelete"); impl ToDistributedBatch for BatchDelete { fn to_distributed(&self) -> Result { @@ -68,9 +65,9 @@ impl ToDistributedBatch for BatchDelete { impl ToBatchPb for BatchDelete { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::Delete(DeleteNode { - table_id: self.logical.table_id.table_id(), - table_version_id: self.logical.table_version_id, - returning: self.logical.returning, + table_id: self.core.table_id.table_id(), + table_version_id: self.core.table_version_id, + returning: self.core.returning, }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_exchange.rs b/src/frontend/src/optimizer/plan_node/batch_exchange.rs index 583838e877c5e..6477c7ec213e2 100644 --- a/src/frontend/src/optimizer/plan_node/batch_exchange.rs +++ b/src/frontend/src/optimizer/plan_node/batch_exchange.rs @@ -17,6 +17,8 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::{ExchangeNode, MergeSortExchangeNode}; +use super::batch::BatchPlanRef; +use super::generic::{GenericPlanRef, PhysicalPlanRef}; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch}; use crate::optimizer::plan_node::ToLocalBatch; @@ -43,12 +45,12 @@ impl Distill for BatchExchange { fn distill<'a>(&self) -> XmlNode<'a> { let input_schema = self.input.schema(); let order = OrderDisplay { - order: &self.base.order, + order: self.base.order(), input_schema, } .distill(); let dist = Pretty::display(&DistributionDisplay { - distribution: &self.base.dist, + distribution: self.base.distribution(), input_schema, }); childless_record("BatchExchange", vec![("order", order), ("dist", dist)]) @@ -75,18 +77,18 @@ impl ToDistributedBatch for BatchExchange { /// The serialization of Batch Exchange is default cuz it will be rewritten in scheduler. impl ToBatchPb for BatchExchange { fn to_batch_prost_body(&self) -> NodeBody { - if self.base.order.is_any() { + if self.base.order().is_any() { NodeBody::Exchange(ExchangeNode { sources: vec![], - input_schema: self.base.schema.to_prost(), + input_schema: self.base.schema().to_prost(), }) } else { NodeBody::MergeSortExchange(MergeSortExchangeNode { exchange: Some(ExchangeNode { sources: vec![], - input_schema: self.base.schema.to_prost(), + input_schema: self.base.schema().to_prost(), }), - column_orders: self.base.order.to_protobuf(), + column_orders: self.base.order().to_protobuf(), }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_expand.rs b/src/frontend/src/optimizer/plan_node/batch_expand.rs index 72caa27858be1..af4413b9e5152 100644 --- a/src/frontend/src/optimizer/plan_node/batch_expand.rs +++ b/src/frontend/src/optimizer/plan_node/batch_expand.rs @@ -29,38 +29,38 @@ use crate::optimizer::PlanRef; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchExpand { pub base: PlanBase, - logical: generic::Expand, + core: generic::Expand, } impl BatchExpand { - pub fn new(logical: generic::Expand) -> Self { - let dist = match logical.input.distribution() { + pub fn new(core: generic::Expand) -> Self { + let dist = match core.input.distribution() { Distribution::Single => Distribution::Single, Distribution::SomeShard | Distribution::HashShard(_) | Distribution::UpstreamHashShard(_, _) => Distribution::SomeShard, Distribution::Broadcast => unreachable!(), }; - let base = PlanBase::new_batch_from_logical(&logical, dist, Order::any()); - BatchExpand { base, logical } + let base = PlanBase::new_batch_with_core(&core, dist, Order::any()); + BatchExpand { base, core } } pub fn column_subsets(&self) -> &[Vec] { - &self.logical.column_subsets + &self.core.column_subsets } } -impl_distill_by_unit!(BatchExpand, logical, "BatchExpand"); +impl_distill_by_unit!(BatchExpand, core, "BatchExpand"); impl PlanTreeNodeUnary for BatchExpand { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_filter.rs b/src/frontend/src/optimizer/plan_node/batch_filter.rs index aadbda9800b16..4bff7cbfee3c0 100644 --- a/src/frontend/src/optimizer/plan_node/batch_filter.rs +++ b/src/frontend/src/optimizer/plan_node/batch_filter.rs @@ -26,35 +26,35 @@ use crate::utils::Condition; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchFilter { pub base: PlanBase, - logical: generic::Filter, + core: generic::Filter, } impl BatchFilter { - pub fn new(logical: generic::Filter) -> Self { + pub fn new(core: generic::Filter) -> Self { // TODO: derive from input - let base = PlanBase::new_batch_from_logical( - &logical, - logical.input.distribution().clone(), - logical.input.order().clone(), + let base = PlanBase::new_batch_with_core( + &core, + core.input.distribution().clone(), + core.input.order().clone(), ); - BatchFilter { base, logical } + BatchFilter { base, core } } pub fn predicate(&self) -> &Condition { - &self.logical.predicate + &self.core.predicate } } -impl_distill_by_unit!(BatchFilter, logical, "BatchFilter"); +impl_distill_by_unit!(BatchFilter, core, "BatchFilter"); impl PlanTreeNodeUnary for BatchFilter { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -70,7 +70,7 @@ impl ToDistributedBatch for BatchFilter { impl ToBatchPb for BatchFilter { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::Filter(FilterNode { - search_condition: Some(ExprImpl::from(self.logical.predicate.clone()).to_expr_proto()), + search_condition: Some(ExprImpl::from(self.core.predicate.clone()).to_expr_proto()), }) } } @@ -88,8 +88,8 @@ impl ExprRewritable for BatchFilter { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_group_topn.rs b/src/frontend/src/optimizer/plan_node/batch_group_topn.rs index 1d61b4e9eb379..70ee8328623f5 100644 --- a/src/frontend/src/optimizer/plan_node/batch_group_topn.rs +++ b/src/frontend/src/optimizer/plan_node/batch_group_topn.rs @@ -27,36 +27,33 @@ use crate::optimizer::property::{Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchGroupTopN { pub base: PlanBase, - logical: generic::TopN, + core: generic::TopN, } impl BatchGroupTopN { - pub fn new(logical: generic::TopN) -> Self { - assert!(!logical.group_key.is_empty()); - let base = PlanBase::new_batch_from_logical( - &logical, - logical.input.distribution().clone(), - Order::any(), - ); - BatchGroupTopN { base, logical } + pub fn new(core: generic::TopN) -> Self { + assert!(!core.group_key.is_empty()); + let base = + PlanBase::new_batch_with_core(&core, core.input.distribution().clone(), Order::any()); + BatchGroupTopN { base, core } } fn group_key(&self) -> &[usize] { - &self.logical.group_key + &self.core.group_key } } -impl_distill_by_unit!(BatchGroupTopN, logical, "BatchGroupTopN"); +impl_distill_by_unit!(BatchGroupTopN, core, "BatchGroupTopN"); impl PlanTreeNodeUnary for BatchGroupTopN { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -73,13 +70,13 @@ impl ToDistributedBatch for BatchGroupTopN { impl ToBatchPb for BatchGroupTopN { fn to_batch_prost_body(&self) -> NodeBody { - let column_orders = self.logical.order.to_protobuf(); + let column_orders = self.core.order.to_protobuf(); NodeBody::GroupTopN(GroupTopNNode { - limit: self.logical.limit_attr.limit(), - offset: self.logical.offset, + limit: self.core.limit_attr.limit(), + offset: self.core.offset, column_orders, group_key: self.group_key().iter().map(|c| *c as u32).collect(), - with_ties: self.logical.limit_attr.with_ties(), + with_ties: self.core.limit_attr.with_ties(), }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_hash_agg.rs b/src/frontend/src/optimizer/plan_node/batch_hash_agg.rs index 7100125dcee99..b4ab3341ace29 100644 --- a/src/frontend/src/optimizer/plan_node/batch_hash_agg.rs +++ b/src/frontend/src/optimizer/plan_node/batch_hash_agg.rs @@ -31,27 +31,27 @@ use crate::utils::{ColIndexMappingRewriteExt, IndexSet}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchHashAgg { pub base: PlanBase, - logical: generic::Agg, + core: generic::Agg, } impl BatchHashAgg { - pub fn new(logical: generic::Agg) -> Self { - assert!(!logical.group_key.is_empty()); - let input = logical.input.clone(); + pub fn new(core: generic::Agg) -> Self { + assert!(!core.group_key.is_empty()); + let input = core.input.clone(); let input_dist = input.distribution(); - let dist = logical + let dist = core .i2o_col_mapping() .rewrite_provided_distribution(input_dist); - let base = PlanBase::new_batch_from_logical(&logical, dist, Order::any()); - BatchHashAgg { base, logical } + let base = PlanBase::new_batch_with_core(&core, dist, Order::any()); + BatchHashAgg { base, core } } pub fn agg_calls(&self) -> &[PlanAggCall] { - &self.logical.agg_calls + &self.core.agg_calls } pub fn group_key(&self) -> &IndexSet { - &self.logical.group_key + &self.core.group_key } fn to_two_phase_agg(&self, dist_input: PlanRef) -> Result { @@ -68,7 +68,7 @@ impl BatchHashAgg { // insert total agg let total_agg_types = self - .logical + .core .agg_calls .iter() .enumerate() @@ -95,29 +95,27 @@ impl BatchHashAgg { } } -impl_distill_by_unit!(BatchHashAgg, logical, "BatchHashAgg"); +impl_distill_by_unit!(BatchHashAgg, core, "BatchHashAgg"); impl PlanTreeNodeUnary for BatchHashAgg { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { BatchHashAgg } impl ToDistributedBatch for BatchHashAgg { fn to_distributed(&self) -> Result { - if self.logical.must_try_two_phase_agg() { + if self.core.must_try_two_phase_agg() { let input = self.input().to_distributed()?; let input_dist = input.distribution(); - if !self - .logical - .hash_agg_dist_satisfied_by_input_dist(input_dist) + if !self.core.hash_agg_dist_satisfied_by_input_dist(input_dist) && matches!( input_dist, Distribution::HashShard(_) @@ -162,8 +160,8 @@ impl ExprRewritable for BatchHashAgg { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_hash_join.rs b/src/frontend/src/optimizer/plan_node/batch_hash_join.rs index a4ecf8311a479..bad586d4af1e4 100644 --- a/src/frontend/src/optimizer/plan_node/batch_hash_join.rs +++ b/src/frontend/src/optimizer/plan_node/batch_hash_join.rs @@ -36,7 +36,7 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchHashJoin { pub base: PlanBase, - logical: generic::Join, + core: generic::Join, /// The join condition must be equivalent to `logical.on`, but separated into equal and /// non-equal parts to facilitate execution later @@ -44,17 +44,13 @@ pub struct BatchHashJoin { } impl BatchHashJoin { - pub fn new(logical: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { - let dist = Self::derive_dist( - logical.left.distribution(), - logical.right.distribution(), - &logical, - ); - let base = PlanBase::new_batch_from_logical(&logical, dist, Order::any()); + pub fn new(core: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { + let dist = Self::derive_dist(core.left.distribution(), core.right.distribution(), &core); + let base = PlanBase::new_batch_with_core(&core, dist, Order::any()); Self { base, - logical, + core, eq_join_predicate, } } @@ -62,25 +58,21 @@ impl BatchHashJoin { pub(super) fn derive_dist( left: &Distribution, right: &Distribution, - logical: &generic::Join, + join: &generic::Join, ) -> Distribution { match (left, right) { (Distribution::Single, Distribution::Single) => Distribution::Single, // we can not derive the hash distribution from the side where outer join can generate a // NULL row - (Distribution::HashShard(_), Distribution::HashShard(_)) => match logical.join_type { + (Distribution::HashShard(_), Distribution::HashShard(_)) => match join.join_type { JoinType::Unspecified => unreachable!(), JoinType::FullOuter => Distribution::SomeShard, JoinType::Inner | JoinType::LeftOuter | JoinType::LeftSemi | JoinType::LeftAnti => { - let l2o = logical - .l2i_col_mapping() - .composite(&logical.i2o_col_mapping()); + let l2o = join.l2i_col_mapping().composite(&join.i2o_col_mapping()); l2o.rewrite_provided_distribution(left) } JoinType::RightSemi | JoinType::RightAnti | JoinType::RightOuter => { - let r2o = logical - .r2i_col_mapping() - .composite(&logical.i2o_col_mapping()); + let r2o = join.r2i_col_mapping().composite(&join.i2o_col_mapping()); r2o.rewrite_provided_distribution(right) } }, @@ -99,11 +91,11 @@ impl BatchHashJoin { impl Distill for BatchHashJoin { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); - vec.push(("type", Pretty::debug(&self.logical.join_type))); + vec.push(("type", Pretty::debug(&self.core.join_type))); - let concat_schema = self.logical.concat_schema(); + let concat_schema = self.core.concat_schema(); vec.push(( "predicate", Pretty::debug(&EqJoinPredicateDisplay { @@ -112,7 +104,7 @@ impl Distill for BatchHashJoin { }), )); if verbose { - let data = IndicesDisplay::from_join(&self.logical, &concat_schema); + let data = IndicesDisplay::from_join(&self.core, &concat_schema); vec.push(("output", data)); } childless_record("BatchHashJoin", vec) @@ -121,18 +113,18 @@ impl Distill for BatchHashJoin { impl PlanTreeNodeBinary for BatchHashJoin { fn left(&self) -> PlanRef { - self.logical.left.clone() + self.core.left.clone() } fn right(&self) -> PlanRef { - self.logical.right.clone() + self.core.right.clone() } fn clone_with_left_right(&self, left: PlanRef, right: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.left = left; - logical.right = right; - Self::new(logical, self.eq_join_predicate.clone()) + let mut core = self.core.clone(); + core.left = left; + core.right = right; + Self::new(core, self.eq_join_predicate.clone()) } } @@ -200,7 +192,7 @@ impl ToDistributedBatch for BatchHashJoin { impl ToBatchPb for BatchHashJoin { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::HashJoin(HashJoinNode { - join_type: self.logical.join_type as i32, + join_type: self.core.join_type as i32, left_key: self .eq_join_predicate .left_eq_indexes() @@ -219,12 +211,7 @@ impl ToBatchPb for BatchHashJoin { .other_cond() .as_expr_unless_true() .map(|x| x.to_expr_proto()), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), }) } } @@ -246,8 +233,8 @@ impl ExprRewritable for BatchHashJoin { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.eq_join_predicate.rewrite_exprs(r)).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.eq_join_predicate.rewrite_exprs(r)).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_hop_window.rs b/src/frontend/src/optimizer/plan_node/batch_hop_window.rs index c4b84b7232d1a..2a4a27f9a0583 100644 --- a/src/frontend/src/optimizer/plan_node/batch_hop_window.rs +++ b/src/frontend/src/optimizer/plan_node/batch_hop_window.rs @@ -30,45 +30,42 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchHopWindow { pub base: PlanBase, - logical: generic::HopWindow, + core: generic::HopWindow, window_start_exprs: Vec, window_end_exprs: Vec, } impl BatchHopWindow { pub fn new( - logical: generic::HopWindow, + core: generic::HopWindow, window_start_exprs: Vec, window_end_exprs: Vec, ) -> Self { - let distribution = logical + let distribution = core .i2o_col_mapping() - .rewrite_provided_distribution(logical.input.distribution()); - let base = PlanBase::new_batch_from_logical( - &logical, - distribution, - logical.get_out_column_index_order(), - ); + .rewrite_provided_distribution(core.input.distribution()); + let base = + PlanBase::new_batch_with_core(&core, distribution, core.get_out_column_index_order()); BatchHopWindow { base, - logical, + core, window_start_exprs, window_end_exprs, } } } -impl_distill_by_unit!(BatchHopWindow, logical, "BatchHopWindow"); +impl_distill_by_unit!(BatchHopWindow, core, "BatchHopWindow"); impl PlanTreeNodeUnary for BatchHopWindow { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; + let mut core = self.core.clone(); + core.input = input; Self::new( - logical, + core, self.window_start_exprs.clone(), self.window_end_exprs.clone(), ) @@ -92,13 +89,13 @@ impl ToDistributedBatch for BatchHopWindow { // communication. // We pass the required dist to its input. let input_required = self - .logical + .core .o2i_col_mapping() .rewrite_required_distribution(required_dist); let new_input = self .input() .to_distributed_with_required(required_order, &input_required)?; - let mut new_logical = self.logical.clone(); + let mut new_logical = self.core.clone(); new_logical.input = new_input; let batch_plan = BatchHopWindow::new( new_logical, @@ -113,15 +110,10 @@ impl ToDistributedBatch for BatchHopWindow { impl ToBatchPb for BatchHopWindow { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::HopWindow(HopWindowNode { - time_col: self.logical.time_col.index() as _, - window_slide: Some(self.logical.window_slide.into()), - window_size: Some(self.logical.window_size.into()), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + time_col: self.core.time_col.index() as _, + window_slide: Some(self.core.window_slide.into()), + window_size: Some(self.core.window_size.into()), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), window_start_exprs: self .window_start_exprs .clone() @@ -152,7 +144,7 @@ impl ExprRewritable for BatchHopWindow { fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { Self::new( - self.logical.clone(), + self.core.clone(), self.window_start_exprs .clone() .into_iter() diff --git a/src/frontend/src/optimizer/plan_node/batch_insert.rs b/src/frontend/src/optimizer/plan_node/batch_insert.rs index 305de0e2f6eaa..aec05eee145b8 100644 --- a/src/frontend/src/optimizer/plan_node/batch_insert.rs +++ b/src/frontend/src/optimizer/plan_node/batch_insert.rs @@ -18,6 +18,7 @@ use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::InsertNode; use risingwave_pb::plan_common::{DefaultColumns, IndexAndExpr}; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{generic, ExprRewritable, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch}; use crate::expr::Expr; @@ -28,40 +29,37 @@ use crate::optimizer::property::{Distribution, Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchInsert { pub base: PlanBase, - pub logical: generic::Insert, + pub core: generic::Insert, } impl BatchInsert { - pub fn new(logical: generic::Insert) -> Self { - assert_eq!(logical.input.distribution(), &Distribution::Single); - let base: PlanBase = PlanBase::new_batch_from_logical( - &logical, - logical.input.distribution().clone(), - Order::any(), - ); + pub fn new(core: generic::Insert) -> Self { + assert_eq!(core.input.distribution(), &Distribution::Single); + let base: PlanBase = + PlanBase::new_batch_with_core(&core, core.input.distribution().clone(), Order::any()); - BatchInsert { base, logical } + BatchInsert { base, core } } } impl Distill for BatchInsert { fn distill<'a>(&self) -> XmlNode<'a> { let vec = self - .logical - .fields_pretty(self.base.ctx.is_explain_verbose()); + .core + .fields_pretty(self.base.ctx().is_explain_verbose()); childless_record("BatchInsert", vec) } } impl PlanTreeNodeUnary for BatchInsert { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -77,14 +75,9 @@ impl ToDistributedBatch for BatchInsert { impl ToBatchPb for BatchInsert { fn to_batch_prost_body(&self) -> NodeBody { - let column_indices = self - .logical - .column_indices - .iter() - .map(|&i| i as u32) - .collect(); + let column_indices = self.core.column_indices.iter().map(|&i| i as u32).collect(); - let default_columns = &self.logical.default_columns; + let default_columns = &self.core.default_columns; let has_default_columns = !default_columns.is_empty(); let default_columns = DefaultColumns { default_columns: default_columns @@ -96,16 +89,16 @@ impl ToBatchPb for BatchInsert { .collect(), }; NodeBody::Insert(InsertNode { - table_id: self.logical.table_id.table_id(), - table_version_id: self.logical.table_version_id, + table_id: self.core.table_id.table_id(), + table_version_id: self.core.table_version_id, column_indices, default_columns: if has_default_columns { Some(default_columns) } else { None }, - row_id_index: self.logical.row_id_index.map(|index| index as _), - returning: self.logical.returning, + row_id_index: self.core.row_id_index.map(|index| index as _), + returning: self.core.returning, }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_limit.rs b/src/frontend/src/optimizer/plan_node/batch_limit.rs index 5fe37b1713ecc..93b14d0198979 100644 --- a/src/frontend/src/optimizer/plan_node/batch_limit.rs +++ b/src/frontend/src/optimizer/plan_node/batch_limit.rs @@ -16,6 +16,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::LimitNode; +use super::generic::PhysicalPlanRef; use super::utils::impl_distill_by_unit; use super::{ generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch, @@ -27,23 +28,23 @@ use crate::optimizer::property::{Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchLimit { pub base: PlanBase, - logical: generic::Limit, + core: generic::Limit, } impl BatchLimit { - pub fn new(logical: generic::Limit) -> Self { - let base = PlanBase::new_batch_from_logical( - &logical, - logical.input.distribution().clone(), - logical.input.order().clone(), + pub fn new(core: generic::Limit) -> Self { + let base = PlanBase::new_batch_with_core( + &core, + core.input.distribution().clone(), + core.input.order().clone(), ); - BatchLimit { base, logical } + BatchLimit { base, core } } - fn two_phase_limit(&self, input: PlanRef) -> Result { - let new_limit = self.logical.limit + self.logical.offset; + fn two_phase_limit(&self, new_input: PlanRef) -> Result { + let new_limit = self.core.limit + self.core.offset; let new_offset = 0; - let logical_partial_limit = generic::Limit::new(input, new_limit, new_offset); + let logical_partial_limit = generic::Limit::new(new_input.clone(), new_limit, new_offset); let batch_partial_limit = Self::new(logical_partial_limit); let any_order = Order::any(); @@ -52,7 +53,7 @@ impl BatchLimit { single_dist.enforce_if_not_satisfies(batch_partial_limit.into(), &any_order)? } else { // The input's distribution is singleton, so use one phase limit is enough. - return Ok(batch_partial_limit.into()); + return Ok(self.clone_with_input(new_input).into()); }; let batch_global_limit = self.clone_with_input(ensure_single_dist); @@ -60,27 +61,27 @@ impl BatchLimit { } pub fn limit(&self) -> u64 { - self.logical.limit + self.core.limit } pub fn offset(&self) -> u64 { - self.logical.offset + self.core.offset } } impl PlanTreeNodeUnary for BatchLimit { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut core = self.logical.clone(); + let mut core = self.core.clone(); core.input = input; Self::new(core) } } impl_plan_tree_node_for_unary! {BatchLimit} -impl_distill_by_unit!(BatchLimit, logical, "BatchLimit"); +impl_distill_by_unit!(BatchLimit, core, "BatchLimit"); impl ToDistributedBatch for BatchLimit { fn to_distributed(&self) -> Result { @@ -91,8 +92,8 @@ impl ToDistributedBatch for BatchLimit { impl ToBatchPb for BatchLimit { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::Limit(LimitNode { - limit: self.logical.limit, - offset: self.logical.offset, + limit: self.core.limit, + offset: self.core.offset, }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_lookup_join.rs b/src/frontend/src/optimizer/plan_node/batch_lookup_join.rs index 3098019499b76..48f99668c3af7 100644 --- a/src/frontend/src/optimizer/plan_node/batch_lookup_join.rs +++ b/src/frontend/src/optimizer/plan_node/batch_lookup_join.rs @@ -18,7 +18,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::{DistributedLookupJoinNode, LocalLookupJoinNode}; -use super::generic::{self}; +use super::generic::{self, GenericPlanRef}; use super::utils::{childless_record, Distill}; use super::ExprRewritable; use crate::expr::{Expr, ExprRewriter}; @@ -34,7 +34,7 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchLookupJoin { pub base: PlanBase, - logical: generic::Join, + core: generic::Join, /// The join condition must be equivalent to `logical.on`, but separated into equal and /// non-equal parts to facilitate execution later @@ -56,7 +56,7 @@ pub struct BatchLookupJoin { impl BatchLookupJoin { pub fn new( - logical: generic::Join, + core: generic::Join, eq_join_predicate: EqJoinPredicate, right_table_desc: TableDesc, right_output_column_ids: Vec, @@ -67,11 +67,11 @@ impl BatchLookupJoin { // lookup. assert!(eq_join_predicate.has_eq()); assert!(eq_join_predicate.eq_keys_are_type_aligned()); - let dist = Self::derive_dist(logical.left.distribution(), &logical); - let base = PlanBase::new_batch_from_logical(&logical, dist, Order::any()); + let dist = Self::derive_dist(core.left.distribution(), &core); + let base = PlanBase::new_batch_with_core(&core, dist, Order::any()); Self { base, - logical, + core, eq_join_predicate, right_table_desc, right_output_column_ids, @@ -80,13 +80,11 @@ impl BatchLookupJoin { } } - fn derive_dist(left: &Distribution, logical: &generic::Join) -> Distribution { + fn derive_dist(left: &Distribution, core: &generic::Join) -> Distribution { match left { Distribution::Single => Distribution::Single, Distribution::HashShard(_) | Distribution::UpstreamHashShard(_, _) => { - let l2o = logical - .l2i_col_mapping() - .composite(&logical.i2o_col_mapping()); + let l2o = core.l2i_col_mapping().composite(&core.i2o_col_mapping()); l2o.rewrite_provided_distribution(left) } _ => unreachable!(), @@ -114,11 +112,11 @@ impl BatchLookupJoin { impl Distill for BatchLookupJoin { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); - vec.push(("type", Pretty::debug(&self.logical.join_type))); + vec.push(("type", Pretty::debug(&self.core.join_type))); - let concat_schema = self.logical.concat_schema(); + let concat_schema = self.core.concat_schema(); vec.push(( "predicate", Pretty::debug(&EqJoinPredicateDisplay { @@ -128,7 +126,7 @@ impl Distill for BatchLookupJoin { )); if verbose { - let data = IndicesDisplay::from_join(&self.logical, &concat_schema); + let data = IndicesDisplay::from_join(&self.core, &concat_schema); vec.push(("output", data)); } @@ -138,15 +136,15 @@ impl Distill for BatchLookupJoin { impl PlanTreeNodeUnary for BatchLookupJoin { fn input(&self) -> PlanRef { - self.logical.left.clone() + self.core.left.clone() } // Only change left side fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.left = input; + let mut core = self.core.clone(); + core.left = input; Self::new( - logical, + core, self.eq_join_predicate.clone(), self.right_table_desc.clone(), self.right_output_column_ids.clone(), @@ -199,7 +197,7 @@ impl ToBatchPb for BatchLookupJoin { fn to_batch_prost_body(&self) -> NodeBody { if self.distributed_lookup { NodeBody::DistributedLookupJoin(DistributedLookupJoinNode { - join_type: self.logical.join_type as i32, + join_type: self.core.join_type as i32, condition: self .eq_join_predicate .other_cond() @@ -223,18 +221,13 @@ impl ToBatchPb for BatchLookupJoin { .iter() .map(ColumnId::get_id) .collect(), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), null_safe: self.eq_join_predicate.null_safes(), lookup_prefix_len: self.lookup_prefix_len as u32, }) } else { NodeBody::LocalLookupJoin(LocalLookupJoinNode { - join_type: self.logical.join_type as i32, + join_type: self.core.join_type as i32, condition: self .eq_join_predicate .other_cond() @@ -259,12 +252,7 @@ impl ToBatchPb for BatchLookupJoin { .iter() .map(ColumnId::get_id) .collect(), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), worker_nodes: vec![], // To be filled in at local.rs null_safe: self.eq_join_predicate.null_safes(), lookup_prefix_len: self.lookup_prefix_len as u32, @@ -289,11 +277,11 @@ impl ExprRewritable for BatchLookupJoin { fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { let base = self.base.clone_with_new_plan_id(); - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); + let mut core = self.core.clone(); + core.rewrite_exprs(r); Self { base, - logical, + core, eq_join_predicate: self.eq_join_predicate.rewrite_exprs(r), ..Self::clone(self) } diff --git a/src/frontend/src/optimizer/plan_node/batch_nested_loop_join.rs b/src/frontend/src/optimizer/plan_node/batch_nested_loop_join.rs index 79d9f07d8eadc..8980ad2f23f6d 100644 --- a/src/frontend/src/optimizer/plan_node/batch_nested_loop_join.rs +++ b/src/frontend/src/optimizer/plan_node/batch_nested_loop_join.rs @@ -17,7 +17,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::NestedLoopJoinNode; -use super::generic::{self}; +use super::generic::{self, GenericPlanRef}; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeBinary, ToBatchPb, ToDistributedBatch}; use crate::expr::{Expr, ExprImpl, ExprRewriter}; @@ -31,14 +31,14 @@ use crate::utils::ConditionDisplay; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchNestedLoopJoin { pub base: PlanBase, - logical: generic::Join, + core: generic::Join, } impl BatchNestedLoopJoin { - pub fn new(logical: generic::Join) -> Self { - let dist = Self::derive_dist(logical.left.distribution(), logical.right.distribution()); - let base = PlanBase::new_batch_from_logical(&logical, dist, Order::any()); - Self { base, logical } + pub fn new(core: generic::Join) -> Self { + let dist = Self::derive_dist(core.left.distribution(), core.right.distribution()); + let base = PlanBase::new_batch_with_core(&core, dist, Order::any()); + Self { base, core } } fn derive_dist(left: &Distribution, right: &Distribution) -> Distribution { @@ -51,21 +51,21 @@ impl BatchNestedLoopJoin { impl Distill for BatchNestedLoopJoin { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); - vec.push(("type", Pretty::debug(&self.logical.join_type))); + vec.push(("type", Pretty::debug(&self.core.join_type))); - let concat_schema = self.logical.concat_schema(); + let concat_schema = self.core.concat_schema(); vec.push(( "predicate", Pretty::debug(&ConditionDisplay { - condition: &self.logical.on, + condition: &self.core.on, input_schema: &concat_schema, }), )); if verbose { - let data = IndicesDisplay::from_join(&self.logical, &concat_schema); + let data = IndicesDisplay::from_join(&self.core, &concat_schema); vec.push(("output", data)); } @@ -75,18 +75,18 @@ impl Distill for BatchNestedLoopJoin { impl PlanTreeNodeBinary for BatchNestedLoopJoin { fn left(&self) -> PlanRef { - self.logical.left.clone() + self.core.left.clone() } fn right(&self) -> PlanRef { - self.logical.right.clone() + self.core.right.clone() } fn clone_with_left_right(&self, left: PlanRef, right: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.left = left; - logical.right = right; - Self::new(logical) + let mut core = self.core.clone(); + core.left = left; + core.right = right; + Self::new(core) } } @@ -108,14 +108,9 @@ impl ToDistributedBatch for BatchNestedLoopJoin { impl ToBatchPb for BatchNestedLoopJoin { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::NestedLoopJoin(NestedLoopJoinNode { - join_type: self.logical.join_type as i32, - join_cond: Some(ExprImpl::from(self.logical.on.clone()).to_expr_proto()), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + join_type: self.core.join_type as i32, + join_cond: Some(ExprImpl::from(self.core.on.clone()).to_expr_proto()), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), }) } } @@ -138,8 +133,8 @@ impl ExprRewritable for BatchNestedLoopJoin { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_over_window.rs b/src/frontend/src/optimizer/plan_node/batch_over_window.rs index aa6e53246697e..fb455758f331a 100644 --- a/src/frontend/src/optimizer/plan_node/batch_over_window.rs +++ b/src/frontend/src/optimizer/plan_node/batch_over_window.rs @@ -17,6 +17,7 @@ use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::SortOverWindowNode; +use super::batch::BatchPlanRef; use super::generic::PlanWindowFunction; use super::utils::impl_distill_by_unit; use super::{ @@ -28,27 +29,26 @@ use crate::optimizer::property::{Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchOverWindow { pub base: PlanBase, - logical: generic::OverWindow, + core: generic::OverWindow, } impl BatchOverWindow { - pub fn new(logical: generic::OverWindow) -> Self { - assert!(logical.funcs_have_same_partition_and_order()); + pub fn new(core: generic::OverWindow) -> Self { + assert!(core.funcs_have_same_partition_and_order()); - let input = &logical.input; + let input = &core.input; let input_dist = input.distribution().clone(); let order = Order::new( - logical - .partition_key_indices() + core.partition_key_indices() .into_iter() .map(|idx| ColumnOrder::new(idx, OrderType::default())) - .chain(logical.order_key().iter().cloned()) + .chain(core.order_key().iter().cloned()) .collect(), ); - let base = PlanBase::new_batch_from_logical(&logical, input_dist, order); - BatchOverWindow { base, logical } + let base = PlanBase::new_batch_with_core(&core, input_dist, order); + BatchOverWindow { base, core } } fn expected_input_order(&self) -> Order { @@ -56,17 +56,17 @@ impl BatchOverWindow { } } -impl_distill_by_unit!(BatchOverWindow, logical, "BatchOverWindow"); +impl_distill_by_unit!(BatchOverWindow, core, "BatchOverWindow"); impl PlanTreeNodeUnary for BatchOverWindow { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -78,7 +78,7 @@ impl ToDistributedBatch for BatchOverWindow { &self.expected_input_order(), &RequiredDist::shard_by_key( self.input().schema().len(), - &self.logical.partition_key_indices(), + &self.core.partition_key_indices(), ), )?; Ok(self.clone_with_input(new_input).into()) @@ -98,13 +98,13 @@ impl ToBatchPb for BatchOverWindow { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::SortOverWindow(SortOverWindowNode { calls: self - .logical + .core .window_functions() .iter() .map(PlanWindowFunction::to_protobuf) .collect(), partition_by: self - .logical + .core .partition_key_indices() .into_iter() .map(|idx| idx as _) diff --git a/src/frontend/src/optimizer/plan_node/batch_project.rs b/src/frontend/src/optimizer/plan_node/batch_project.rs index d3979b8aebdee..642683967c5c3 100644 --- a/src/frontend/src/optimizer/plan_node/batch_project.rs +++ b/src/frontend/src/optimizer/plan_node/batch_project.rs @@ -18,6 +18,7 @@ use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::ProjectNode; use risingwave_pb::expr::ExprNode; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch, @@ -31,46 +32,46 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchProject { pub base: PlanBase, - logical: generic::Project, + core: generic::Project, } impl BatchProject { - pub fn new(logical: generic::Project) -> Self { - let distribution = logical + pub fn new(core: generic::Project) -> Self { + let distribution = core .i2o_col_mapping() - .rewrite_provided_distribution(logical.input.distribution()); - let order = logical + .rewrite_provided_distribution(core.input.distribution()); + let order = core .i2o_col_mapping() - .rewrite_provided_order(logical.input.order()); + .rewrite_provided_order(core.input.order()); - let base = PlanBase::new_batch_from_logical(&logical, distribution, order); - BatchProject { base, logical } + let base = PlanBase::new_batch_with_core(&core, distribution, order); + BatchProject { base, core } } pub fn as_logical(&self) -> &generic::Project { - &self.logical + &self.core } pub fn exprs(&self) -> &Vec { - &self.logical.exprs + &self.core.exprs } } impl Distill for BatchProject { fn distill<'a>(&self) -> XmlNode<'a> { - childless_record("BatchProject", self.logical.fields_pretty(self.schema())) + childless_record("BatchProject", self.core.fields_pretty(self.schema())) } } impl PlanTreeNodeUnary for BatchProject { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -86,7 +87,7 @@ impl ToDistributedBatch for BatchProject { impl ToBatchPb for BatchProject { fn to_batch_prost_body(&self) -> NodeBody { let select_list = self - .logical + .core .exprs .iter() .map(|expr| expr.to_expr_proto()) @@ -108,8 +109,8 @@ impl ExprRewritable for BatchProject { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_project_set.rs b/src/frontend/src/optimizer/plan_node/batch_project_set.rs index b86211aaaa211..5888df9d15889 100644 --- a/src/frontend/src/optimizer/plan_node/batch_project_set.rs +++ b/src/frontend/src/optimizer/plan_node/batch_project_set.rs @@ -29,35 +29,32 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchProjectSet { pub base: PlanBase, - logical: generic::ProjectSet, + core: generic::ProjectSet, } impl BatchProjectSet { - pub fn new(logical: generic::ProjectSet) -> Self { - let distribution = logical + pub fn new(core: generic::ProjectSet) -> Self { + let distribution = core .i2o_col_mapping() - .rewrite_provided_distribution(logical.input.distribution()); + .rewrite_provided_distribution(core.input.distribution()); - let base = PlanBase::new_batch_from_logical( - &logical, - distribution, - logical.get_out_column_index_order(), - ); - BatchProjectSet { base, logical } + let base = + PlanBase::new_batch_with_core(&core, distribution, core.get_out_column_index_order()); + BatchProjectSet { base, core } } } -impl_distill_by_unit!(BatchProjectSet, logical, "BatchProjectSet"); +impl_distill_by_unit!(BatchProjectSet, core, "BatchProjectSet"); impl PlanTreeNodeUnary for BatchProjectSet { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -76,7 +73,7 @@ impl ToBatchPb for BatchProjectSet { fn to_batch_prost_body(&self) -> NodeBody { NodeBody::ProjectSet(ProjectSetNode { select_list: self - .logical + .core .select_list .iter() .map(|select_item| select_item.to_project_set_select_item_proto()) @@ -98,8 +95,8 @@ impl ExprRewritable for BatchProjectSet { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_seq_scan.rs b/src/frontend/src/optimizer/plan_node/batch_seq_scan.rs index cfc557fe375c6..6834ed29353b9 100644 --- a/src/frontend/src/optimizer/plan_node/batch_seq_scan.rs +++ b/src/frontend/src/optimizer/plan_node/batch_seq_scan.rs @@ -24,6 +24,8 @@ use risingwave_pb::batch_plan::row_seq_scan_node::ChunkSize; use risingwave_pb::batch_plan::{RowSeqScanNode, SysRowSeqScanNode}; use risingwave_pb::plan_common::PbColumnDesc; +use super::batch::BatchPlanRef; +use super::generic::{GenericPlanRef, PhysicalPlanRef}; use super::utils::{childless_record, Distill}; use super::{generic, ExprRewritable, PlanBase, PlanRef, ToBatchPb, ToDistributedBatch}; use crate::catalog::ColumnId; @@ -35,25 +37,25 @@ use crate::optimizer::property::{Distribution, DistributionDisplay, Order}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchSeqScan { pub base: PlanBase, - logical: generic::Scan, + core: generic::Scan, scan_ranges: Vec, } impl BatchSeqScan { - fn new_inner(logical: generic::Scan, dist: Distribution, scan_ranges: Vec) -> Self { + fn new_inner(core: generic::Scan, dist: Distribution, scan_ranges: Vec) -> Self { let order = if scan_ranges.len() > 1 { Order::any() } else { - logical.get_out_column_index_order() + core.get_out_column_index_order() }; - let base = PlanBase::new_batch_from_logical(&logical, dist, order); + let base = PlanBase::new_batch_with_core(&core, dist, order); { // validate scan_range scan_ranges.iter().for_each(|scan_range| { assert!(!scan_range.is_full_table_scan()); let scan_pk_prefix_len = scan_range.eq_conds.len(); - let order_len = logical.table_desc.order_column_indices().len(); + let order_len = core.table_desc.order_column_indices().len(); assert!( scan_pk_prefix_len < order_len || (scan_pk_prefix_len == order_len && is_full_range(&scan_range.range)), @@ -64,23 +66,23 @@ impl BatchSeqScan { Self { base, - logical, + core, scan_ranges, } } - pub fn new(logical: generic::Scan, scan_ranges: Vec) -> Self { + pub fn new(core: generic::Scan, scan_ranges: Vec) -> Self { // Use `Single` by default, will be updated later with `clone_with_dist`. - Self::new_inner(logical, Distribution::Single, scan_ranges) + Self::new_inner(core, Distribution::Single, scan_ranges) } fn clone_with_dist(&self) -> Self { Self::new_inner( - self.logical.clone(), - if self.logical.is_sys_table { + self.core.clone(), + if self.core.is_sys_table { Distribution::Single } else { - match self.logical.distribution_key() { + match self.core.distribution_key() { None => Distribution::SomeShard, Some(distribution_key) => { if distribution_key.is_empty() { @@ -97,7 +99,7 @@ impl BatchSeqScan { // inserted. Distribution::UpstreamHashShard( distribution_key, - self.logical.table_desc.table_id, + self.core.table_desc.table_id, ) } } @@ -109,8 +111,8 @@ impl BatchSeqScan { /// Get a reference to the batch seq scan's logical. #[must_use] - pub fn logical(&self) -> &generic::Scan { - &self.logical + pub fn core(&self) -> &generic::Scan { + &self.core } pub fn scan_ranges(&self) -> &[ScanRange] { @@ -119,8 +121,8 @@ impl BatchSeqScan { fn scan_ranges_as_strs(&self, verbose: bool) -> Vec { let order_names = match verbose { - true => self.logical.order_names_with_table_prefix(), - false => self.logical.order_names(), + true => self.core.order_names_with_table_prefix(), + false => self.core.order_names(), }; let mut range_strs = vec![]; @@ -180,10 +182,10 @@ fn range_to_string(name: &str, range: &(Bound, Bound)) - impl Distill for BatchSeqScan { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(4); - vec.push(("table", Pretty::from(self.logical.table_name.clone()))); - vec.push(("columns", self.logical.columns_pretty(verbose))); + vec.push(("table", Pretty::from(self.core.table_name.clone()))); + vec.push(("columns", self.core.columns_pretty(verbose))); if !self.scan_ranges.is_empty() { let range_strs = self.scan_ranges_as_strs(verbose); @@ -196,7 +198,7 @@ impl Distill for BatchSeqScan { if verbose { let dist = Pretty::display(&DistributionDisplay { distribution: self.distribution(), - input_schema: &self.base.schema, + input_schema: self.base.schema(), }); vec.push(("distribution", dist)); } @@ -214,22 +216,22 @@ impl ToDistributedBatch for BatchSeqScan { impl ToBatchPb for BatchSeqScan { fn to_batch_prost_body(&self) -> NodeBody { let column_descs = self - .logical + .core .column_descs() .iter() .map(PbColumnDesc::from) .collect(); - if self.logical.is_sys_table { + if self.core.is_sys_table { NodeBody::SysRowSeqScan(SysRowSeqScanNode { - table_id: self.logical.table_desc.table_id.table_id, + table_id: self.core.table_desc.table_id.table_id, column_descs, }) } else { NodeBody::RowSeqScan(RowSeqScanNode { - table_desc: Some(self.logical.table_desc.to_protobuf()), + table_desc: Some(self.core.table_desc.to_protobuf()), column_ids: self - .logical + .core .output_column_ids() .iter() .map(ColumnId::get_id) @@ -239,7 +241,7 @@ impl ToBatchPb for BatchSeqScan { vnode_bitmap: None, ordered: !self.order().is_any(), chunk_size: self - .logical + .core .chunk_size .map(|chunk_size| ChunkSize { chunk_size }), }) @@ -249,18 +251,18 @@ impl ToBatchPb for BatchSeqScan { impl ToLocalBatch for BatchSeqScan { fn to_local(&self) -> Result { - let dist = if self.logical.is_sys_table { + let dist = if self.core.is_sys_table { Distribution::Single - } else if let Some(distribution_key) = self.logical.distribution_key() + } else if let Some(distribution_key) = self.core.distribution_key() && !distribution_key.is_empty() { - Distribution::UpstreamHashShard(distribution_key, self.logical.table_desc.table_id) + Distribution::UpstreamHashShard(distribution_key, self.core.table_desc.table_id) } else { // NOTE(kwannoel): This is a hack to force an exchange to always be inserted before // scan. Distribution::SomeShard }; - Ok(Self::new_inner(self.logical.clone(), dist, self.scan_ranges.clone()).into()) + Ok(Self::new_inner(self.core.clone(), dist, self.scan_ranges.clone()).into()) } } @@ -270,8 +272,8 @@ impl ExprRewritable for BatchSeqScan { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.scan_ranges.clone()).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.scan_ranges.clone()).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_simple_agg.rs b/src/frontend/src/optimizer/plan_node/batch_simple_agg.rs index b414779385200..bae8d70c2eedf 100644 --- a/src/frontend/src/optimizer/plan_node/batch_simple_agg.rs +++ b/src/frontend/src/optimizer/plan_node/batch_simple_agg.rs @@ -16,7 +16,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::SortAggNode; -use super::generic::{self, PlanAggCall}; +use super::generic::{self, GenericPlanRef, PlanAggCall}; use super::utils::impl_distill_by_unit; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch}; use crate::expr::ExprRewriter; @@ -26,44 +26,47 @@ use crate::optimizer::property::{Distribution, Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchSimpleAgg { pub base: PlanBase, - logical: generic::Agg, + core: generic::Agg, } impl BatchSimpleAgg { - pub fn new(logical: generic::Agg) -> Self { - let input_dist = logical.input.distribution().clone(); - let base = PlanBase::new_batch_from_logical(&logical, input_dist, Order::any()); - BatchSimpleAgg { base, logical } + pub fn new(core: generic::Agg) -> Self { + let input_dist = core.input.distribution().clone(); + let base = PlanBase::new_batch_with_core(&core, input_dist, Order::any()); + BatchSimpleAgg { base, core } } pub fn agg_calls(&self) -> &[PlanAggCall] { - &self.logical.agg_calls + &self.core.agg_calls } fn two_phase_agg_enabled(&self) -> bool { - let session_ctx = self.base.ctx.session_ctx(); - session_ctx.config().get_enable_two_phase_agg() + self.base + .ctx() + .session_ctx() + .config() + .get_enable_two_phase_agg() } pub(crate) fn can_two_phase_agg(&self) -> bool { - self.logical.can_two_phase_agg() && self.two_phase_agg_enabled() + self.core.can_two_phase_agg() && self.two_phase_agg_enabled() } } impl PlanTreeNodeUnary for BatchSimpleAgg { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { Self::new(generic::Agg { input, - ..self.logical.clone() + ..self.core.clone() }) } } impl_plan_tree_node_for_unary! { BatchSimpleAgg } -impl_distill_by_unit!(BatchSimpleAgg, logical, "BatchSimpleAgg"); +impl_distill_by_unit!(BatchSimpleAgg, core, "BatchSimpleAgg"); impl ToDistributedBatch for BatchSimpleAgg { fn to_distributed(&self) -> Result { @@ -83,7 +86,7 @@ impl ToDistributedBatch for BatchSimpleAgg { // insert total agg let total_agg_types = self - .logical + .core .agg_calls .iter() .enumerate() @@ -92,7 +95,7 @@ impl ToDistributedBatch for BatchSimpleAgg { }) .collect(); let total_agg_logical = - generic::Agg::new(total_agg_types, self.logical.group_key.clone(), exchange); + generic::Agg::new(total_agg_types, self.core.group_key.clone(), exchange); Ok(BatchSimpleAgg::new(total_agg_logical).into()) } else { let new_input = self @@ -134,8 +137,8 @@ impl ExprRewritable for BatchSimpleAgg { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_sort.rs b/src/frontend/src/optimizer/plan_node/batch_sort.rs index 8576a18c19333..e7bff6d51d85b 100644 --- a/src/frontend/src/optimizer/plan_node/batch_sort.rs +++ b/src/frontend/src/optimizer/plan_node/batch_sort.rs @@ -17,6 +17,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::SortNode; +use super::batch::BatchPlanRef; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch}; use crate::optimizer::plan_node::ToLocalBatch; @@ -56,7 +57,7 @@ impl PlanTreeNodeUnary for BatchSort { } fn clone_with_input(&self, input: PlanRef) -> Self { - Self::new(input, self.base.order.clone()) + Self::new(input, self.base.order().clone()) } } impl_plan_tree_node_for_unary! {BatchSort} @@ -70,7 +71,7 @@ impl ToDistributedBatch for BatchSort { impl ToBatchPb for BatchSort { fn to_batch_prost_body(&self) -> NodeBody { - let column_orders = self.base.order.to_protobuf(); + let column_orders = self.base.order().to_protobuf(); NodeBody::Sort(SortNode { column_orders }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_sort_agg.rs b/src/frontend/src/optimizer/plan_node/batch_sort_agg.rs index 241f1195352e3..2252d4c0c0ee0 100644 --- a/src/frontend/src/optimizer/plan_node/batch_sort_agg.rs +++ b/src/frontend/src/optimizer/plan_node/batch_sort_agg.rs @@ -28,18 +28,18 @@ use crate::utils::{ColIndexMappingRewriteExt, IndexSet}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchSortAgg { pub base: PlanBase, - logical: generic::Agg, + core: generic::Agg, input_order: Order, } impl BatchSortAgg { - pub fn new(logical: generic::Agg) -> Self { - assert!(!logical.group_key.is_empty()); - assert!(logical.input_provides_order_on_group_keys()); + pub fn new(core: generic::Agg) -> Self { + assert!(!core.group_key.is_empty()); + assert!(core.input_provides_order_on_group_keys()); - let input = logical.input.clone(); + let input = core.input.clone(); let input_dist = input.distribution(); - let dist = logical + let dist = core .i2o_col_mapping() .rewrite_provided_distribution(input_dist); let input_order = Order { @@ -47,46 +47,44 @@ impl BatchSortAgg { .order() .column_orders .iter() - .filter(|o| logical.group_key.indices().any(|g_k| g_k == o.column_index)) + .filter(|o| core.group_key.indices().any(|g_k| g_k == o.column_index)) .cloned() .collect(), }; - let order = logical - .i2o_col_mapping() - .rewrite_provided_order(&input_order); + let order = core.i2o_col_mapping().rewrite_provided_order(&input_order); - let base = PlanBase::new_batch_from_logical(&logical, dist, order); + let base = PlanBase::new_batch_with_core(&core, dist, order); BatchSortAgg { base, - logical, + core, input_order, } } pub fn agg_calls(&self) -> &[PlanAggCall] { - &self.logical.agg_calls + &self.core.agg_calls } pub fn group_key(&self) -> &IndexSet { - &self.logical.group_key + &self.core.group_key } } impl PlanTreeNodeUnary for BatchSortAgg { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { BatchSortAgg } -impl_distill_by_unit!(BatchSortAgg, logical, "BatchSortAgg"); +impl_distill_by_unit!(BatchSortAgg, core, "BatchSortAgg"); impl ToDistributedBatch for BatchSortAgg { fn to_distributed(&self) -> Result { @@ -136,7 +134,7 @@ impl ExprRewritable for BatchSortAgg { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut new_logical = self.logical.clone(); + let mut new_logical = self.core.clone(); new_logical.rewrite_exprs(r); Self::new(new_logical).into() } diff --git a/src/frontend/src/optimizer/plan_node/batch_source.rs b/src/frontend/src/optimizer/plan_node/batch_source.rs index 3adfbf670343a..9e2cd6006db0b 100644 --- a/src/frontend/src/optimizer/plan_node/batch_source.rs +++ b/src/frontend/src/optimizer/plan_node/batch_source.rs @@ -19,6 +19,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::SourceNode; +use super::generic::GenericPlanRef; use super::utils::{childless_record, column_names_pretty, Distill}; use super::{ generic, ExprRewritable, PlanBase, PlanRef, ToBatchPb, ToDistributedBatch, ToLocalBatch, @@ -30,19 +31,19 @@ use crate::optimizer::property::{Distribution, Order}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchSource { pub base: PlanBase, - logical: generic::Source, + core: generic::Source, } impl BatchSource { - pub fn new(logical: generic::Source) -> Self { - let base = PlanBase::new_batch_from_logical( - &logical, + pub fn new(core: generic::Source) -> Self { + let base = PlanBase::new_batch_with_core( + &core, // Use `Single` by default, will be updated later with `clone_with_dist`. Distribution::Single, Order::any(), ); - Self { base, logical } + Self { base, core } } pub fn column_names(&self) -> Vec<&str> { @@ -50,19 +51,20 @@ impl BatchSource { } pub fn source_catalog(&self) -> Option> { - self.logical.catalog.clone() + self.core.catalog.clone() } pub fn kafka_timestamp_range_value(&self) -> (Option, Option) { - self.logical.kafka_timestamp_range_value() + self.core.kafka_timestamp_range_value() } pub fn clone_with_dist(&self) -> Self { - let mut base = self.base.clone(); - base.dist = Distribution::SomeShard; + let base = self + .base + .clone_with_new_distribution(Distribution::SomeShard); Self { base, - logical: self.logical.clone(), + core: self.core.clone(), } } } @@ -100,7 +102,7 @@ impl ToBatchPb for BatchSource { source_id: source_catalog.id, info: Some(source_catalog.info.clone()), columns: self - .logical + .core .column_catalog .iter() .map(|c| c.to_protobuf()) diff --git a/src/frontend/src/optimizer/plan_node/batch_table_function.rs b/src/frontend/src/optimizer/plan_node/batch_table_function.rs index 91aa1af0abbe7..0b9887cd4aaba 100644 --- a/src/frontend/src/optimizer/plan_node/batch_table_function.rs +++ b/src/frontend/src/optimizer/plan_node/batch_table_function.rs @@ -17,6 +17,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::TableFunctionNode; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeLeaf, ToBatchPb, ToDistributedBatch}; use crate::expr::ExprRewriter; @@ -39,7 +40,7 @@ impl BatchTableFunction { } pub fn with_dist(logical: LogicalTableFunction, dist: Distribution) -> Self { - let ctx = logical.base.ctx.clone(); + let ctx = logical.base.ctx().clone(); let base = PlanBase::new_batch(ctx, logical.schema().clone(), dist, Order::any()); BatchTableFunction { base, logical } } diff --git a/src/frontend/src/optimizer/plan_node/batch_topn.rs b/src/frontend/src/optimizer/plan_node/batch_topn.rs index b8b5ba710e468..b2eda24046d28 100644 --- a/src/frontend/src/optimizer/plan_node/batch_topn.rs +++ b/src/frontend/src/optimizer/plan_node/batch_topn.rs @@ -29,38 +29,34 @@ use crate::optimizer::property::{Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchTopN { pub base: PlanBase, - logical: generic::TopN, + core: generic::TopN, } impl BatchTopN { - pub fn new(logical: generic::TopN) -> Self { - assert!(logical.group_key.is_empty()); - let base = PlanBase::new_batch_from_logical( - &logical, - logical.input.distribution().clone(), + pub fn new(core: generic::TopN) -> Self { + assert!(core.group_key.is_empty()); + let base = PlanBase::new_batch_with_core( + &core, + core.input.distribution().clone(), // BatchTopN outputs data in the order of specified order - logical.order.clone(), + core.order.clone(), ); - BatchTopN { base, logical } + BatchTopN { base, core } } fn two_phase_topn(&self, input: PlanRef) -> Result { let new_limit = TopNLimit::new( - self.logical.limit_attr.limit() + self.logical.offset, - self.logical.limit_attr.with_ties(), + self.core.limit_attr.limit() + self.core.offset, + self.core.limit_attr.with_ties(), ); let new_offset = 0; - let partial_input: PlanRef = if input.order().satisfies(&self.logical.order) { + let partial_input: PlanRef = if input.order().satisfies(&self.core.order) { let logical_partial_limit = generic::Limit::new(input, new_limit.limit(), new_offset); let batch_partial_limit = BatchLimit::new(logical_partial_limit); batch_partial_limit.into() } else { - let logical_partial_topn = generic::TopN::without_group( - input, - new_limit, - new_offset, - self.logical.order.clone(), - ); + let logical_partial_topn = + generic::TopN::without_group(input, new_limit, new_offset, self.core.order.clone()); let batch_partial_topn = Self::new(logical_partial_topn); batch_partial_topn.into() }; @@ -78,17 +74,17 @@ impl BatchTopN { } } -impl_distill_by_unit!(BatchTopN, logical, "BatchTopN"); +impl_distill_by_unit!(BatchTopN, core, "BatchTopN"); impl PlanTreeNodeUnary for BatchTopN { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -102,12 +98,12 @@ impl ToDistributedBatch for BatchTopN { impl ToBatchPb for BatchTopN { fn to_batch_prost_body(&self) -> NodeBody { - let column_orders = self.logical.order.to_protobuf(); + let column_orders = self.core.order.to_protobuf(); NodeBody::TopN(TopNNode { - limit: self.logical.limit_attr.limit(), - offset: self.logical.offset, + limit: self.core.limit_attr.limit(), + offset: self.core.offset, column_orders, - with_ties: self.logical.limit_attr.with_ties(), + with_ties: self.core.limit_attr.with_ties(), }) } } diff --git a/src/frontend/src/optimizer/plan_node/batch_union.rs b/src/frontend/src/optimizer/plan_node/batch_union.rs index 1626d32db2cc8..c7c71111174c6 100644 --- a/src/frontend/src/optimizer/plan_node/batch_union.rs +++ b/src/frontend/src/optimizer/plan_node/batch_union.rs @@ -25,12 +25,12 @@ use crate::optimizer::property::{Distribution, Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchUnion { pub base: PlanBase, - logical: generic::Union, + core: generic::Union, } impl BatchUnion { - pub fn new(logical: generic::Union) -> Self { - let dist = if logical + pub fn new(core: generic::Union) -> Self { + let dist = if core .inputs .iter() .all(|input| *input.distribution() == Distribution::Single) @@ -40,21 +40,21 @@ impl BatchUnion { Distribution::SomeShard }; - let base = PlanBase::new_batch_from_logical(&logical, dist, Order::any()); - BatchUnion { base, logical } + let base = PlanBase::new_batch_with_core(&core, dist, Order::any()); + BatchUnion { base, core } } } -impl_distill_by_unit!(BatchUnion, logical, "BatchUnion"); +impl_distill_by_unit!(BatchUnion, core, "BatchUnion"); impl PlanTreeNode for BatchUnion { fn inputs(&self) -> smallvec::SmallVec<[crate::optimizer::PlanRef; 2]> { - smallvec::SmallVec::from_vec(self.logical.inputs.clone()) + smallvec::SmallVec::from_vec(self.core.inputs.clone()) } fn clone_with_inputs(&self, inputs: &[crate::optimizer::PlanRef]) -> PlanRef { // For batch query, we don't need to clone `source_col`, so just use new. - let mut new = self.logical.clone(); + let mut new = self.core.clone(); new.inputs = inputs.to_vec(); Self::new(new).into() } diff --git a/src/frontend/src/optimizer/plan_node/batch_update.rs b/src/frontend/src/optimizer/plan_node/batch_update.rs index 19bb60b9aa1d8..20e4b8b6b966c 100644 --- a/src/frontend/src/optimizer/plan_node/batch_update.rs +++ b/src/frontend/src/optimizer/plan_node/batch_update.rs @@ -18,6 +18,7 @@ use risingwave_common::error::Result; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::UpdateNode; +use super::generic::GenericPlanRef; use super::utils::impl_distill_by_unit; use super::{ generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, ToBatchPb, ToDistributedBatch, @@ -30,32 +31,32 @@ use crate::optimizer::property::{Distribution, Order, RequiredDist}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct BatchUpdate { pub base: PlanBase, - pub logical: generic::Update, + pub core: generic::Update, } impl BatchUpdate { - pub fn new(logical: generic::Update, schema: Schema) -> Self { - assert_eq!(logical.input.distribution(), &Distribution::Single); - let ctx = logical.input.ctx(); + pub fn new(core: generic::Update, schema: Schema) -> Self { + assert_eq!(core.input.distribution(), &Distribution::Single); + let ctx = core.input.ctx(); let base = PlanBase::new_batch(ctx, schema, Distribution::Single, Order::any()); - Self { base, logical } + Self { base, core } } } impl PlanTreeNodeUnary for BatchUpdate { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical, self.schema().clone()) + let mut core = self.core.clone(); + core.input = input; + Self::new(core, self.schema().clone()) } } impl_plan_tree_node_for_unary! { BatchUpdate } -impl_distill_by_unit!(BatchUpdate, logical, "BatchUpdate"); +impl_distill_by_unit!(BatchUpdate, core, "BatchUpdate"); impl ToDistributedBatch for BatchUpdate { fn to_distributed(&self) -> Result { @@ -67,24 +68,19 @@ impl ToDistributedBatch for BatchUpdate { impl ToBatchPb for BatchUpdate { fn to_batch_prost_body(&self) -> NodeBody { - let exprs = self - .logical - .exprs - .iter() - .map(|x| x.to_expr_proto()) - .collect(); + let exprs = self.core.exprs.iter().map(|x| x.to_expr_proto()).collect(); let update_column_indices = self - .logical + .core .update_column_indices .iter() .map(|i| *i as _) .collect_vec(); NodeBody::Update(UpdateNode { exprs, - table_id: self.logical.table_id.table_id(), - table_version_id: self.logical.table_version_id, - returning: self.logical.returning, + table_id: self.core.table_id.table_id(), + table_version_id: self.core.table_version_id, + returning: self.core.returning, update_column_indices, }) } @@ -104,8 +100,8 @@ impl ExprRewritable for BatchUpdate { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.schema().clone()).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.schema().clone()).into() } } diff --git a/src/frontend/src/optimizer/plan_node/batch_values.rs b/src/frontend/src/optimizer/plan_node/batch_values.rs index 5f4e2308493a9..9348cddba7422 100644 --- a/src/frontend/src/optimizer/plan_node/batch_values.rs +++ b/src/frontend/src/optimizer/plan_node/batch_values.rs @@ -18,6 +18,7 @@ use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::values_node::ExprTuple; use risingwave_pb::batch_plan::ValuesNode; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ ExprRewritable, LogicalValues, PlanBase, PlanRef, PlanTreeNodeLeaf, ToBatchPb, @@ -42,7 +43,7 @@ impl BatchValues { } pub fn with_dist(logical: LogicalValues, dist: Distribution) -> Self { - let ctx = logical.base.ctx.clone(); + let ctx = logical.base.ctx().clone(); let base = PlanBase::new_batch(ctx, logical.schema().clone(), dist, Order::any()); BatchValues { base, logical } } diff --git a/src/frontend/src/optimizer/plan_node/derive.rs b/src/frontend/src/optimizer/plan_node/derive.rs index fe2bd3e2da325..92ea32c73592c 100644 --- a/src/frontend/src/optimizer/plan_node/derive.rs +++ b/src/frontend/src/optimizer/plan_node/derive.rs @@ -82,7 +82,12 @@ pub(crate) fn derive_pk( columns: &[ColumnCatalog], ) -> (Vec, Vec) { // Note(congyi): avoid pk duplication - let stream_key = input.logical_pk().iter().copied().unique().collect_vec(); + let stream_key = input + .expect_stream_key() + .iter() + .copied() + .unique() + .collect_vec(); let schema = input.schema(); // Assert the uniqueness of column names and IDs, including hidden columns. diff --git a/src/frontend/src/optimizer/plan_node/generic/agg.rs b/src/frontend/src/optimizer/plan_node/generic/agg.rs index 63bde692af617..e0c7e339ee6a6 100644 --- a/src/frontend/src/optimizer/plan_node/generic/agg.rs +++ b/src/frontend/src/optimizer/plan_node/generic/agg.rs @@ -20,10 +20,11 @@ use itertools::{Either, Itertools}; use pretty_xmlish::{Pretty, StrAssocArr}; use risingwave_common::catalog::{Field, FieldDisplay, Schema}; use risingwave_common::types::DataType; +use risingwave_common::util::iter_util::ZipEqFast; use risingwave_common::util::sort_util::{ColumnOrder, ColumnOrderDisplay, OrderType}; -use risingwave_common::util::value_encoding; -use risingwave_expr::agg::{agg_kinds, AggKind}; -use risingwave_pb::data::PbDatum; +use risingwave_common::util::value_encoding::DatumToProtoExt; +use risingwave_expr::aggregate::{agg_kinds, AggKind}; +use risingwave_expr::sig::FUNCTION_REGISTRY; use risingwave_pb::expr::{PbAggCall, PbConstant}; use risingwave_pb::stream_plan::{agg_call_state, AggCallState as AggCallStatePb}; @@ -52,6 +53,7 @@ pub struct Agg { pub group_key: IndexSet, pub grouping_sets: Vec, pub input: PlanRef, + pub enable_two_phase: bool, } impl Agg { @@ -88,8 +90,8 @@ impl Agg { self.ctx().session_ctx().config().get_force_two_phase_agg() } - fn two_phase_agg_enabled(&self) -> bool { - self.ctx().session_ctx().config().get_enable_two_phase_agg() + pub fn two_phase_agg_enabled(&self) -> bool { + self.enable_two_phase } pub(crate) fn can_two_phase_agg(&self) -> bool { @@ -136,26 +138,28 @@ impl Agg { } pub fn new(agg_calls: Vec, group_key: IndexSet, input: PlanRef) -> Self { + let enable_two_phase = input + .ctx() + .session_ctx() + .config() + .get_enable_two_phase_agg(); Self { agg_calls, group_key, input, grouping_sets: vec![], + enable_two_phase, } } - pub fn new_with_grouping_sets( - agg_calls: Vec, - group_key: IndexSet, - grouping_sets: Vec, - input: PlanRef, - ) -> Self { - Self { - agg_calls, - group_key, - grouping_sets, - input, - } + pub fn with_grouping_sets(mut self, grouping_sets: Vec) -> Self { + self.grouping_sets = grouping_sets; + self + } + + pub fn with_enable_two_phase(mut self, enable_two_phase: bool) -> Self { + self.enable_two_phase = enable_two_phase; + self } } @@ -190,7 +194,7 @@ impl GenericPlanNode for Agg { Schema { fields } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { Some((0..self.group_key.len()).collect()) } @@ -270,7 +274,7 @@ impl Agg { HashMap, ) { ( - self.infer_result_table(me, vnode_col_idx, window_col_idx), + self.infer_intermediate_state_table(me, vnode_col_idx, window_col_idx), self.infer_stream_agg_state(me, vnode_col_idx, window_col_idx), self.infer_distinct_dedup_tables(me, vnode_col_idx, window_col_idx), ) @@ -339,7 +343,7 @@ impl Agg { window_col_idx: Option, ) -> Vec { let in_fields = self.input.schema().fields().to_vec(); - let in_pks = self.input.logical_pk().to_vec(); + let in_pks = self.input.stream_key().unwrap().to_vec(); let in_append_only = self.input.append_only(); let in_dist_key = self.input.distribution().dist_column_indices().to_vec(); @@ -408,7 +412,9 @@ impl Agg { | AggKind::FirstValue | AggKind::LastValue | AggKind::StringAgg - | AggKind::ArrayAgg => { + | AggKind::ArrayAgg + | AggKind::JsonbAgg + | AggKind::JsonbObjectAgg => { // columns with order requirement in state table let sort_keys = { match agg_call.agg_kind { @@ -421,7 +427,8 @@ impl Agg { AggKind::FirstValue | AggKind::LastValue | AggKind::StringAgg - | AggKind::ArrayAgg => { + | AggKind::ArrayAgg + | AggKind::JsonbAgg => { if agg_call.order_by.is_empty() { me.ctx().warn_to_user(format!( "{} without ORDER BY may produce non-deterministic result", @@ -443,6 +450,11 @@ impl Agg { }) .collect() } + AggKind::JsonbObjectAgg => agg_call + .order_by + .iter() + .map(|o| (o.order_type, o.column_index)) + .collect(), _ => unreachable!(), } }; @@ -451,7 +463,11 @@ impl Agg { AggKind::FirstValue | AggKind::LastValue | AggKind::StringAgg - | AggKind::ArrayAgg => agg_call.inputs.iter().map(|i| i.index).collect(), + | AggKind::ArrayAgg + | AggKind::JsonbAgg + | AggKind::JsonbObjectAgg => { + agg_call.inputs.iter().map(|i| i.index).collect() + } _ => vec![], }; let state = gen_materialized_input_state(sort_keys, include_keys); @@ -467,13 +483,43 @@ impl Agg { .collect() } - pub fn infer_result_table( + /// table schema: + /// group key | state for AGG1 | state for AGG2 | ... + pub fn infer_intermediate_state_table( &self, me: &impl GenericPlanRef, vnode_col_idx: Option, window_col_idx: Option, ) -> TableCatalog { - let out_fields = me.schema().fields(); + let mut out_fields = me.schema().fields().to_vec(); + + // rewrite data types in fields + let in_append_only = self.input.append_only(); + for (agg_call, field) in self + .agg_calls + .iter() + .zip_eq_fast(&mut out_fields[self.group_key.len()..]) + { + let sig = FUNCTION_REGISTRY + .get_aggregate( + agg_call.agg_kind, + &agg_call + .inputs + .iter() + .map(|input| input.data_type.clone()) + .collect_vec(), + &agg_call.return_type, + in_append_only, + ) + .expect("agg not found"); + if !in_append_only && sig.append_only { + // we use materialized input state for non-retractable aggregate function. + // for backward compatibility, the state type is same as the return type. + // its values in the intermediate state table are always null. + } else if let Some(state_type) = &sig.state_type { + field.data_type = state_type.clone(); + } + } let in_dist_key = self.input.distribution().dist_column_indices().to_vec(); let n_group_key_cols = self.group_key.len(); @@ -551,12 +597,13 @@ impl Agg { .collect() } - pub fn decompose(self) -> (Vec, IndexSet, Vec, PlanRef) { + pub fn decompose(self) -> (Vec, IndexSet, Vec, PlanRef, bool) { ( self.agg_calls, self.group_key, self.grouping_sets, self.input, + self.enable_two_phase, ) } @@ -679,9 +726,7 @@ impl PlanAggCall { .direct_args .iter() .map(|x| PbConstant { - datum: Some(PbDatum { - body: value_encoding::serialize_datum(x.get_data()), - }), + datum: Some(x.get_data().to_protobuf()), r#type: Some(x.return_type().to_protobuf()), }) .collect(), diff --git a/src/frontend/src/optimizer/plan_node/generic/dedup.rs b/src/frontend/src/optimizer/plan_node/generic/dedup.rs index 89bfe14b2e9d3..bcec1b41ff7da 100644 --- a/src/frontend/src/optimizer/plan_node/generic/dedup.rs +++ b/src/frontend/src/optimizer/plan_node/generic/dedup.rs @@ -50,7 +50,7 @@ impl GenericPlanNode for Dedup { self.input.schema().clone() } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { Some(self.dedup_cols.clone()) } diff --git a/src/frontend/src/optimizer/plan_node/generic/delete.rs b/src/frontend/src/optimizer/plan_node/generic/delete.rs index 5d178b654acec..26952dd1c4031 100644 --- a/src/frontend/src/optimizer/plan_node/generic/delete.rs +++ b/src/frontend/src/optimizer/plan_node/generic/delete.rs @@ -60,9 +60,9 @@ impl GenericPlanNode for Delete { } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { if self.returning { - Some(self.input.logical_pk().to_vec()) + Some(self.input.stream_key()?.to_vec()) } else { Some(vec![]) } diff --git a/src/frontend/src/optimizer/plan_node/generic/dynamic_filter.rs b/src/frontend/src/optimizer/plan_node/generic/dynamic_filter.rs index aa8afc1779be8..85ffd922c43e7 100644 --- a/src/frontend/src/optimizer/plan_node/generic/dynamic_filter.rs +++ b/src/frontend/src/optimizer/plan_node/generic/dynamic_filter.rs @@ -58,8 +58,8 @@ impl GenericPlanNode for DynamicFilter { self.left.schema().clone() } - fn logical_pk(&self) -> Option> { - Some(self.left.logical_pk().to_vec()) + fn stream_key(&self) -> Option> { + Some(self.left.stream_key()?.to_vec()) } fn ctx(&self) -> OptimizerContextRef { @@ -151,7 +151,7 @@ pub fn infer_left_internal_table_catalog( let mut pk_indices = vec![left_key_index]; let read_prefix_len_hint = pk_indices.len(); - for i in me.logical_pk() { + for i in me.stream_key().unwrap() { if *i != left_key_index { pk_indices.push(*i); } diff --git a/src/frontend/src/optimizer/plan_node/generic/except.rs b/src/frontend/src/optimizer/plan_node/generic/except.rs index 3721db69eefb8..a49802e8d9155 100644 --- a/src/frontend/src/optimizer/plan_node/generic/except.rs +++ b/src/frontend/src/optimizer/plan_node/generic/except.rs @@ -33,8 +33,8 @@ impl GenericPlanNode for Except { self.inputs[0].schema().clone() } - fn logical_pk(&self) -> Option> { - Some(self.inputs[0].logical_pk().to_vec()) + fn stream_key(&self) -> Option> { + Some(self.inputs[0].stream_key()?.to_vec()) } fn ctx(&self) -> OptimizerContextRef { diff --git a/src/frontend/src/optimizer/plan_node/generic/expand.rs b/src/frontend/src/optimizer/plan_node/generic/expand.rs index d78bd4c112028..ba7c52aa814ef 100644 --- a/src/frontend/src/optimizer/plan_node/generic/expand.rs +++ b/src/frontend/src/optimizer/plan_node/generic/expand.rs @@ -57,11 +57,11 @@ impl GenericPlanNode for Expand { Schema::new(fields) } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { let input_schema_len = self.input.schema().len(); let mut pk_indices = self .input - .logical_pk() + .stream_key()? .iter() .map(|&pk| pk + input_schema_len) .collect_vec(); diff --git a/src/frontend/src/optimizer/plan_node/generic/filter.rs b/src/frontend/src/optimizer/plan_node/generic/filter.rs index 2f6542cb19a9a..5b09e504121c0 100644 --- a/src/frontend/src/optimizer/plan_node/generic/filter.rs +++ b/src/frontend/src/optimizer/plan_node/generic/filter.rs @@ -53,8 +53,8 @@ impl GenericPlanNode for Filter { self.input.schema().clone() } - fn logical_pk(&self) -> Option> { - Some(self.input.logical_pk().to_vec()) + fn stream_key(&self) -> Option> { + Some(self.input.stream_key()?.to_vec()) } fn ctx(&self) -> OptimizerContextRef { diff --git a/src/frontend/src/optimizer/plan_node/generic/hop_window.rs b/src/frontend/src/optimizer/plan_node/generic/hop_window.rs index 131e7c6f1455e..9bd0dec4b70cc 100644 --- a/src/frontend/src/optimizer/plan_node/generic/hop_window.rs +++ b/src/frontend/src/optimizer/plan_node/generic/hop_window.rs @@ -63,7 +63,7 @@ impl GenericPlanNode for HopWindow { .collect() } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { let window_start_index = self .output_indices .iter() @@ -77,7 +77,7 @@ impl GenericPlanNode for HopWindow { } else { let mut pk = self .input - .logical_pk() + .stream_key()? .iter() .filter_map(|&pk_idx| self.output_indices.iter().position(|&idx| idx == pk_idx)) .collect_vec(); diff --git a/src/frontend/src/optimizer/plan_node/generic/insert.rs b/src/frontend/src/optimizer/plan_node/generic/insert.rs index c5bfeb725ff83..727e0296e0c83 100644 --- a/src/frontend/src/optimizer/plan_node/generic/insert.rs +++ b/src/frontend/src/optimizer/plan_node/generic/insert.rs @@ -61,7 +61,7 @@ impl GenericPlanNode for Insert { } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { None } diff --git a/src/frontend/src/optimizer/plan_node/generic/intersect.rs b/src/frontend/src/optimizer/plan_node/generic/intersect.rs index 3b781eeb37fbd..c0db320dbf537 100644 --- a/src/frontend/src/optimizer/plan_node/generic/intersect.rs +++ b/src/frontend/src/optimizer/plan_node/generic/intersect.rs @@ -32,8 +32,8 @@ impl GenericPlanNode for Intersect { self.inputs[0].schema().clone() } - fn logical_pk(&self) -> Option> { - Some(self.inputs[0].logical_pk().to_vec()) + fn stream_key(&self) -> Option> { + Some(self.inputs[0].stream_key()?.to_vec()) } fn ctx(&self) -> OptimizerContextRef { diff --git a/src/frontend/src/optimizer/plan_node/generic/join.rs b/src/frontend/src/optimizer/plan_node/generic/join.rs index 47c6b66286d98..87c03cc14c8c9 100644 --- a/src/frontend/src/optimizer/plan_node/generic/join.rs +++ b/src/frontend/src/optimizer/plan_node/generic/join.rs @@ -12,15 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use itertools::EitherOrBoth; -use risingwave_common::catalog::Schema; +use itertools::{EitherOrBoth, Itertools}; +use risingwave_common::catalog::{Field, Schema}; +use risingwave_common::types::DataType; +use risingwave_common::util::sort_util::OrderType; use risingwave_pb::plan_common::JoinType; use super::{EqJoinPredicate, GenericPlanNode, GenericPlanRef}; use crate::expr::ExprRewriter; use crate::optimizer::optimizer_context::OptimizerContextRef; +use crate::optimizer::plan_node::stream; +use crate::optimizer::plan_node::utils::TableCatalogBuilder; use crate::optimizer::property::FunctionalDependencySet; use crate::utils::{ColIndexMapping, ColIndexMappingRewriteExt, Condition}; +use crate::TableCatalog; /// [`Join`] combines two relations according to some condition. /// @@ -65,6 +70,75 @@ impl Join { } } +impl Join { + /// Return stream hash join internal table catalog and degree table catalog. + pub fn infer_internal_and_degree_table_catalog( + input: &PlanRef, + join_key_indices: Vec, + dk_indices_in_jk: Vec, + ) -> (TableCatalog, TableCatalog, Vec) { + let schema = input.schema(); + + let internal_table_dist_keys = dk_indices_in_jk + .iter() + .map(|idx| join_key_indices[*idx]) + .collect_vec(); + + let degree_table_dist_keys = dk_indices_in_jk.clone(); + + // The pk of hash join internal and degree table should be join_key + input_pk. + let join_key_len = join_key_indices.len(); + let mut pk_indices = join_key_indices; + + // dedup the pk in dist key.. + let mut deduped_input_pk_indices = vec![]; + for input_pk_idx in input.stream_key().unwrap() { + if !pk_indices.contains(input_pk_idx) + && !deduped_input_pk_indices.contains(input_pk_idx) + { + deduped_input_pk_indices.push(*input_pk_idx); + } + } + + pk_indices.extend(deduped_input_pk_indices.clone()); + + // Build internal table + let mut internal_table_catalog_builder = + TableCatalogBuilder::new(input.ctx().with_options().internal_table_subset()); + let internal_columns_fields = schema.fields().to_vec(); + + internal_columns_fields.iter().for_each(|field| { + internal_table_catalog_builder.add_column(field); + }); + pk_indices.iter().for_each(|idx| { + internal_table_catalog_builder.add_order_column(*idx, OrderType::ascending()) + }); + + // Build degree table. + let mut degree_table_catalog_builder = + TableCatalogBuilder::new(input.ctx().with_options().internal_table_subset()); + + let degree_column_field = Field::with_name(DataType::Int64, "_degree"); + + pk_indices.iter().enumerate().for_each(|(order_idx, idx)| { + degree_table_catalog_builder.add_column(&internal_columns_fields[*idx]); + degree_table_catalog_builder.add_order_column(order_idx, OrderType::ascending()); + }); + degree_table_catalog_builder.add_column(°ree_column_field); + degree_table_catalog_builder + .set_value_indices(vec![degree_table_catalog_builder.columns().len() - 1]); + + internal_table_catalog_builder.set_dist_key_in_pk(dk_indices_in_jk.clone()); + degree_table_catalog_builder.set_dist_key_in_pk(dk_indices_in_jk); + + ( + internal_table_catalog_builder.build(internal_table_dist_keys, join_key_len), + degree_table_catalog_builder.build(degree_table_dist_keys, join_key_len), + deduped_input_pk_indices, + ) + } +} + impl GenericPlanNode for Join { fn schema(&self) -> Schema { let left_schema = self.left.schema(); @@ -90,75 +164,89 @@ impl GenericPlanNode for Join { Schema { fields } } - fn logical_pk(&self) -> Option> { - let _left_len = self.left.schema().len(); - let _right_len = self.right.schema().len(); - let left_pk = self.left.logical_pk(); - let right_pk = self.right.logical_pk(); + fn stream_key(&self) -> Option> { + let left_len = self.left.schema().len(); + let right_len = self.right.schema().len(); + let eq_predicate = EqJoinPredicate::create(left_len, right_len, self.on.clone()); + + let left_pk = self.left.stream_key()?; + let right_pk = self.right.stream_key()?; let l2i = self.l2i_col_mapping(); let r2i = self.r2i_col_mapping(); let full_out_col_num = self.internal_column_num(); let i2o = ColIndexMapping::with_remaining_columns(&self.output_indices, full_out_col_num); - let pk_indices = left_pk + let mut pk_indices = left_pk .iter() .map(|index| l2i.try_map(*index)) .chain(right_pk.iter().map(|index| r2i.try_map(*index))) .flatten() .map(|index| i2o.try_map(index)) - .collect::>>(); + .collect::>>()?; // NOTE(st1page): add join keys in the pk_indices a work around before we really have stream // key. - pk_indices.and_then(|mut pk_indices| { - let left_len = self.left.schema().len(); - let right_len = self.right.schema().len(); - let eq_predicate = EqJoinPredicate::create(left_len, right_len, self.on.clone()); - - let l2i = self.l2i_col_mapping(); - let r2i = self.r2i_col_mapping(); - let full_out_col_num = self.internal_column_num(); - let i2o = - ColIndexMapping::with_remaining_columns(&self.output_indices, full_out_col_num); - - let either_or_both = self.add_which_join_key_to_pk(); - - for (lk, rk) in eq_predicate.eq_indexes() { - match either_or_both { - EitherOrBoth::Left(_) => { - if let Some(lk) = l2i.try_map(lk) { - let out_k = i2o.try_map(lk)?; - if !pk_indices.contains(&out_k) { - pk_indices.push(out_k); - } + let l2i = self.l2i_col_mapping(); + let r2i = self.r2i_col_mapping(); + let full_out_col_num = self.internal_column_num(); + let i2o = ColIndexMapping::with_remaining_columns(&self.output_indices, full_out_col_num); + + let either_or_both = self.add_which_join_key_to_pk(); + + for (lk, rk) in eq_predicate.eq_indexes() { + match either_or_both { + EitherOrBoth::Left(_) => { + // Remove right-side join-key column it from pk_indices. + // This may happen when right-side join-key is included in right-side PK. + // e.g. select a, b where a.bid = b.id + // Here the pk_indices should be [a.id, a.bid] instead of [a.id, b.id, a.bid], + // because b.id = a.bid, so either of them would be enough. + if let Some(rk) = r2i.try_map(rk) { + if let Some(out_k) = i2o.try_map(rk) { + pk_indices.retain(|&x| x != out_k); } } - EitherOrBoth::Right(_) => { - if let Some(rk) = r2i.try_map(rk) { - let out_k = i2o.try_map(rk)?; - if !pk_indices.contains(&out_k) { - pk_indices.push(out_k); - } + // Add left-side join-key column in pk_indices + if let Some(lk) = l2i.try_map(lk) { + let out_k = i2o.try_map(lk)?; + if !pk_indices.contains(&out_k) { + pk_indices.push(out_k); } } - EitherOrBoth::Both(_, _) => { - if let Some(lk) = l2i.try_map(lk) { - let out_k = i2o.try_map(lk)?; - if !pk_indices.contains(&out_k) { - pk_indices.push(out_k); - } + } + EitherOrBoth::Right(_) => { + // Remove left-side join-key column it from pk_indices + // See the example above + if let Some(lk) = l2i.try_map(lk) { + if let Some(out_k) = i2o.try_map(lk) { + pk_indices.retain(|&x| x != out_k); } - if let Some(rk) = r2i.try_map(rk) { - let out_k = i2o.try_map(rk)?; - if !pk_indices.contains(&out_k) { - pk_indices.push(out_k); - } + } + // Add right-side join-key column in pk_indices + if let Some(rk) = r2i.try_map(rk) { + let out_k = i2o.try_map(rk)?; + if !pk_indices.contains(&out_k) { + pk_indices.push(out_k); } } - }; - } - Some(pk_indices) - }) + } + EitherOrBoth::Both(_, _) => { + if let Some(lk) = l2i.try_map(lk) { + let out_k = i2o.try_map(lk)?; + if !pk_indices.contains(&out_k) { + pk_indices.push(out_k); + } + } + if let Some(rk) = r2i.try_map(rk) { + let out_k = i2o.try_map(rk)?; + if !pk_indices.contains(&out_k) { + pk_indices.push(out_k); + } + } + } + }; + } + Some(pk_indices) } fn ctx(&self) -> OptimizerContextRef { diff --git a/src/frontend/src/optimizer/plan_node/generic/limit.rs b/src/frontend/src/optimizer/plan_node/generic/limit.rs index 060fdf47cbda4..b4ac1ef7821e2 100644 --- a/src/frontend/src/optimizer/plan_node/generic/limit.rs +++ b/src/frontend/src/optimizer/plan_node/generic/limit.rs @@ -42,8 +42,8 @@ impl GenericPlanNode for Limit { self.input.functional_dependency().clone() } - fn logical_pk(&self) -> Option> { - Some(self.input.logical_pk().to_vec()) + fn stream_key(&self) -> Option> { + Some(self.input.stream_key()?.to_vec()) } } impl Limit { diff --git a/src/frontend/src/optimizer/plan_node/generic/mod.rs b/src/frontend/src/optimizer/plan_node/generic/mod.rs index 032eaa40fcda2..aec59c90bcc4e 100644 --- a/src/frontend/src/optimizer/plan_node/generic/mod.rs +++ b/src/frontend/src/optimizer/plan_node/generic/mod.rs @@ -18,9 +18,9 @@ use std::hash::Hash; use pretty_xmlish::XmlNode; use risingwave_common::catalog::Schema; -use super::{stream, EqJoinPredicate}; +use super::{stream, EqJoinPredicate, PlanNodeId}; use crate::optimizer::optimizer_context::OptimizerContextRef; -use crate::optimizer::property::FunctionalDependencySet; +use crate::optimizer::property::{Distribution, FunctionalDependencySet}; pub mod dynamic_filter; pub use dynamic_filter::*; @@ -85,23 +85,20 @@ macro_rules! impl_distill_unit_from_fields { pub(super) use impl_distill_unit_from_fields; pub trait GenericPlanRef: Eq + Hash { + fn id(&self) -> PlanNodeId; fn schema(&self) -> &Schema; - fn logical_pk(&self) -> &[usize]; + fn stream_key(&self) -> Option<&[usize]>; fn functional_dependency(&self) -> &FunctionalDependencySet; fn ctx(&self) -> OptimizerContextRef; } +pub trait PhysicalPlanRef: GenericPlanRef { + fn distribution(&self) -> &Distribution; +} + pub trait GenericPlanNode { - /// return (schema, `logical_pk`, fds) - fn logical_properties(&self) -> (Schema, Option>, FunctionalDependencySet) { - ( - self.schema(), - self.logical_pk(), - self.functional_dependency(), - ) - } fn functional_dependency(&self) -> FunctionalDependencySet; fn schema(&self) -> Schema; - fn logical_pk(&self) -> Option>; + fn stream_key(&self) -> Option>; fn ctx(&self) -> OptimizerContextRef; } diff --git a/src/frontend/src/optimizer/plan_node/generic/over_window.rs b/src/frontend/src/optimizer/plan_node/generic/over_window.rs index 5f7b0705fba26..96e60184fbcca 100644 --- a/src/frontend/src/optimizer/plan_node/generic/over_window.rs +++ b/src/frontend/src/optimizer/plan_node/generic/over_window.rs @@ -18,7 +18,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_common::util::sort_util::{ColumnOrder, ColumnOrderDisplay}; -use risingwave_expr::function::window::{Frame, WindowFuncKind}; +use risingwave_expr::window_function::{Frame, WindowFuncKind}; use risingwave_pb::expr::PbWindowFunction; use super::{DistillUnit, GenericPlanNode, GenericPlanRef}; @@ -218,8 +218,8 @@ impl GenericPlanNode for OverWindow { schema } - fn logical_pk(&self) -> Option> { - let mut output_pk = self.input.logical_pk().to_vec(); + fn stream_key(&self) -> Option> { + let mut output_pk = self.input.stream_key()?.to_vec(); for part_key_idx in self .window_functions .iter() diff --git a/src/frontend/src/optimizer/plan_node/generic/project.rs b/src/frontend/src/optimizer/plan_node/generic/project.rs index 1d35332f0e709..d8b6988af4391 100644 --- a/src/frontend/src/optimizer/plan_node/generic/project.rs +++ b/src/frontend/src/optimizer/plan_node/generic/project.rs @@ -97,10 +97,10 @@ impl GenericPlanNode for Project { Schema { fields } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { let i2o = self.i2o_col_mapping(); self.input - .logical_pk() + .stream_key()? .iter() .map(|pk_col| i2o.try_map(*pk_col)) .collect::>>() diff --git a/src/frontend/src/optimizer/plan_node/generic/project_set.rs b/src/frontend/src/optimizer/plan_node/generic/project_set.rs index e159927f7bfda..fef26d1b32993 100644 --- a/src/frontend/src/optimizer/plan_node/generic/project_set.rs +++ b/src/frontend/src/optimizer/plan_node/generic/project_set.rs @@ -88,11 +88,11 @@ impl GenericPlanNode for ProjectSet { Schema { fields } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { let i2o = self.i2o_col_mapping(); let mut pk = self .input - .logical_pk() + .stream_key()? .iter() .map(|pk_col| i2o.try_map(*pk_col)) .collect::>>() diff --git a/src/frontend/src/optimizer/plan_node/generic/scan.rs b/src/frontend/src/optimizer/plan_node/generic/scan.rs index 526a677e279b5..b7de99d11096b 100644 --- a/src/frontend/src/optimizer/plan_node/generic/scan.rs +++ b/src/frontend/src/optimizer/plan_node/generic/scan.rs @@ -307,7 +307,7 @@ impl GenericPlanNode for Scan { Schema { fields } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { let id_to_op_idx = Self::get_id_to_op_idx_mapping(&self.output_col_idx, &self.table_desc); self.table_desc .stream_key @@ -325,7 +325,7 @@ impl GenericPlanNode for Scan { } fn functional_dependency(&self) -> FunctionalDependencySet { - let pk_indices = self.logical_pk(); + let pk_indices = self.stream_key(); let col_num = self.output_col_idx.len(); match &pk_indices { Some(pk_indices) => FunctionalDependencySet::with_key(col_num, pk_indices), diff --git a/src/frontend/src/optimizer/plan_node/generic/share.rs b/src/frontend/src/optimizer/plan_node/generic/share.rs index c22a46357fa66..838a02c07f2e1 100644 --- a/src/frontend/src/optimizer/plan_node/generic/share.rs +++ b/src/frontend/src/optimizer/plan_node/generic/share.rs @@ -43,8 +43,8 @@ impl GenericPlanNode for Share { self.input.borrow().schema().clone() } - fn logical_pk(&self) -> Option> { - Some(self.input.borrow().logical_pk().to_vec()) + fn stream_key(&self) -> Option> { + Some(self.input.borrow().stream_key()?.to_vec()) } fn ctx(&self) -> OptimizerContextRef { diff --git a/src/frontend/src/optimizer/plan_node/generic/source.rs b/src/frontend/src/optimizer/plan_node/generic/source.rs index 4a4f092110dee..4d508cc37894e 100644 --- a/src/frontend/src/optimizer/plan_node/generic/source.rs +++ b/src/frontend/src/optimizer/plan_node/generic/source.rs @@ -60,7 +60,7 @@ impl GenericPlanNode for Source { Schema { fields } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { self.row_id_index.map(|idx| vec![idx]) } @@ -69,7 +69,7 @@ impl GenericPlanNode for Source { } fn functional_dependency(&self) -> FunctionalDependencySet { - let pk_indices = self.logical_pk(); + let pk_indices = self.stream_key(); match pk_indices { Some(pk_indices) => { FunctionalDependencySet::with_key(self.column_catalog.len(), &pk_indices) diff --git a/src/frontend/src/optimizer/plan_node/generic/top_n.rs b/src/frontend/src/optimizer/plan_node/generic/top_n.rs index 990e37219d19b..dce6dfded0ac8 100644 --- a/src/frontend/src/optimizer/plan_node/generic/top_n.rs +++ b/src/frontend/src/optimizer/plan_node/generic/top_n.rs @@ -42,7 +42,7 @@ impl TopN { &self, schema: &Schema, ctx: OptimizerContextRef, - stream_key: &[usize], + input_stream_key: &[usize], vnode_col_idx: Option, ) -> TableCatalog { let columns_fields = schema.fields().to_vec(); @@ -64,17 +64,16 @@ impl TopN { internal_table_catalog_builder.add_order_column(idx, OrderType::ascending()); order_cols.insert(idx); }); - let read_prefix_len_hint = internal_table_catalog_builder.get_current_pk_len(); + column_orders.iter().for_each(|order| { internal_table_catalog_builder.add_order_column(order.column_index, order.order_type); order_cols.insert(order.column_index); }); - stream_key.iter().for_each(|idx| { - if !order_cols.contains(idx) { + input_stream_key.iter().for_each(|idx| { + if order_cols.insert(*idx) { internal_table_catalog_builder.add_order_column(*idx, OrderType::ascending()); - order_cols.insert(*idx); } }); if let Some(vnode_col_idx) = vnode_col_idx { @@ -170,20 +169,20 @@ impl GenericPlanNode for TopN { self.input.schema().clone() } - fn logical_pk(&self) -> Option> { - // We can use the group key as the stream key when there is at most one record for each - // value of the group key. - if self.limit_attr.max_one_row() { - Some(self.group_key.clone()) - } else { - let mut pk = self.input.logical_pk().to_vec(); - for i in &self.group_key { - if !pk.contains(i) { - pk.push(*i); + fn stream_key(&self) -> Option> { + let input_stream_key = self.input.stream_key()?; + let mut stream_key = self.group_key.clone(); + if !self.limit_attr.max_one_row() { + for i in input_stream_key { + if !stream_key.contains(i) { + stream_key.push(*i); } } - Some(pk) } + // else: We can use the group key as the stream key when there is at most one record for each + // value of the group key. + + Some(stream_key) } fn ctx(&self) -> OptimizerContextRef { diff --git a/src/frontend/src/optimizer/plan_node/generic/union.rs b/src/frontend/src/optimizer/plan_node/generic/union.rs index 91f10eac749f0..3e6a5b9b9bab6 100644 --- a/src/frontend/src/optimizer/plan_node/generic/union.rs +++ b/src/frontend/src/optimizer/plan_node/generic/union.rs @@ -33,14 +33,20 @@ pub struct Union { impl GenericPlanNode for Union { fn schema(&self) -> Schema { - self.inputs[0].schema().clone() + let mut schema = self.inputs[0].schema().clone(); + if let Some(source_col) = self.source_col { + schema.fields[source_col].name = "$src".to_string(); + schema + } else { + schema + } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { // Union all its inputs pks + source_col if exists let mut pk_indices = vec![]; for input in &self.inputs { - for pk in input.logical_pk() { + for pk in input.stream_key()? { if !pk_indices.contains(pk) { pk_indices.push(*pk); } diff --git a/src/frontend/src/optimizer/plan_node/generic/update.rs b/src/frontend/src/optimizer/plan_node/generic/update.rs index 0943d770daaef..16d73afe62003 100644 --- a/src/frontend/src/optimizer/plan_node/generic/update.rs +++ b/src/frontend/src/optimizer/plan_node/generic/update.rs @@ -62,9 +62,9 @@ impl GenericPlanNode for Update { } } - fn logical_pk(&self) -> Option> { + fn stream_key(&self) -> Option> { if self.returning { - Some(self.input.logical_pk().to_vec()) + Some(self.input.stream_key()?.to_vec()) } else { Some(vec![]) } diff --git a/src/frontend/src/optimizer/plan_node/logical_agg.rs b/src/frontend/src/optimizer/plan_node/logical_agg.rs index 4387c8f4f89f3..0ad9b828ead4b 100644 --- a/src/frontend/src/optimizer/plan_node/logical_agg.rs +++ b/src/frontend/src/optimizer/plan_node/logical_agg.rs @@ -17,7 +17,7 @@ use itertools::Itertools; use risingwave_common::error::{ErrorCode, Result}; use risingwave_common::types::{DataType, Datum, ScalarImpl}; use risingwave_common::util::sort_util::ColumnOrder; -use risingwave_expr::agg::{agg_kinds, AggKind}; +use risingwave_expr::aggregate::{agg_kinds, AggKind}; use super::generic::{self, Agg, GenericPlanRef, PlanAggCall, ProjectBuilder}; use super::utils::impl_distill_by_unit; @@ -28,9 +28,9 @@ use super::{ }; use crate::expr::{ AggCall, Expr, ExprImpl, ExprRewriter, ExprType, FunctionCall, InputRef, Literal, OrderBy, + WindowFunction, }; use crate::optimizer::plan_node::generic::GenericPlanNode; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::{ gen_filter_and_pushdown, BatchSortAgg, ColumnPruningContext, LogicalDedup, LogicalProject, PredicatePushdownContext, RewriteStreamContext, ToStreamContext, @@ -57,9 +57,9 @@ impl LogicalAgg { /// Should only be used iff input is distributed. Input must be converted to stream form. fn gen_stateless_two_phase_streaming_agg_plan(&self, stream_input: PlanRef) -> Result { debug_assert!(self.group_key().is_empty()); - let mut logical = self.core.clone(); - logical.input = stream_input; - let local_agg = StreamStatelessSimpleAgg::new(logical); + let mut core = self.core.clone(); + core.input = stream_input; + let local_agg = StreamStatelessSimpleAgg::new(core); let exchange = RequiredDist::single().enforce_if_not_satisfies(local_agg.into(), &Order::any())?; let global_agg = new_stream_simple_agg(Agg::new( @@ -165,19 +165,19 @@ impl LogicalAgg { } fn gen_single_plan(&self, stream_input: PlanRef) -> Result { - let mut logical = self.core.clone(); + let mut core = self.core.clone(); let input = RequiredDist::single().enforce_if_not_satisfies(stream_input, &Order::any())?; - logical.input = input; - Ok(new_stream_simple_agg(logical).into()) + core.input = input; + Ok(new_stream_simple_agg(core).into()) } fn gen_shuffle_plan(&self, stream_input: PlanRef) -> Result { let input = RequiredDist::shard_by_key(stream_input.schema().len(), &self.group_key().to_vec()) .enforce_if_not_satisfies(stream_input, &Order::any())?; - let mut logical = self.core.clone(); - logical.input = input; - Ok(new_stream_hash_agg(logical, None).into()) + let mut core = self.core.clone(); + core.input = input; + Ok(new_stream_hash_agg(core, None).into()) } /// Generates distributed stream plan. @@ -222,8 +222,11 @@ impl LogicalAgg { // so it obeys consistent hash strategy via [`Distribution::HashShard`]. let stream_input = if *input_dist == Distribution::SomeShard && self.core.must_try_two_phase_agg() { - RequiredDist::shard_by_key(stream_input.schema().len(), stream_input.logical_pk()) - .enforce_if_not_satisfies(stream_input, &Order::any())? + RequiredDist::shard_by_key( + stream_input.schema().len(), + stream_input.expect_stream_key(), + ) + .enforce_if_not_satisfies(stream_input, &Order::any())? } else { stream_input }; @@ -361,13 +364,9 @@ impl LogicalAggBuilder { let logical_project = LogicalProject::with_core(self.input_proj_builder.build(input)); // This LogicalAgg focuses on calculating the aggregates and grouping. - Agg::new_with_grouping_sets( - self.agg_calls, - self.group_key, - self.grouping_sets, - logical_project.into(), - ) - .into() + Agg::new(self.agg_calls, self.group_key, logical_project.into()) + .with_grouping_sets(self.grouping_sets) + .into() } fn rewrite_with_error(&mut self, expr: ExprImpl) -> Result { @@ -735,6 +734,33 @@ impl ExprRewriter for LogicalAggBuilder { } } + /// When there is an `WindowFunction` (outside of agg call), it must refers to a group column. + /// Or all `InputRef`s appears in it must refer to a group column. + fn rewrite_window_function(&mut self, window_func: WindowFunction) -> ExprImpl { + let WindowFunction { + args, + partition_by, + order_by, + .. + } = window_func; + let args = args + .into_iter() + .map(|expr| self.rewrite_expr(expr)) + .collect(); + let partition_by = partition_by + .into_iter() + .map(|expr| self.rewrite_expr(expr)) + .collect(); + let order_by = order_by.rewrite_expr(self); + WindowFunction { + args, + partition_by, + order_by, + ..window_func + } + .into() + } + /// When there is an `InputRef` (outside of agg call), it must refers to a group column. fn rewrite_input_ref(&mut self, input_ref: InputRef) -> ExprImpl { let expr = input_ref.into(); @@ -831,7 +857,7 @@ impl LogicalAgg { &self.core.grouping_sets } - pub fn decompose(self) -> (Vec, IndexSet, Vec, PlanRef) { + pub fn decompose(self) -> (Vec, IndexSet, Vec, PlanRef, bool) { self.core.decompose() } @@ -870,8 +896,9 @@ impl LogicalAgg { .map(|set| set.indices().map(|key| input_col_change.map(key)).collect()) .collect(); - let new_agg = - Agg::new_with_grouping_sets(agg_calls, group_key.clone(), grouping_sets, input); + let new_agg = Agg::new(agg_calls, group_key.clone(), input) + .with_grouping_sets(grouping_sets) + .with_enable_two_phase(self.core().enable_two_phase); // group_key remapping might cause an output column change, since group key actually is a // `FixedBitSet`. @@ -896,13 +923,10 @@ impl PlanTreeNodeUnary for LogicalAgg { } fn clone_with_input(&self, input: PlanRef) -> Self { - Agg::new_with_grouping_sets( - self.agg_calls().to_vec(), - self.group_key().clone(), - self.grouping_sets().clone(), - input, - ) - .into() + Agg::new(self.agg_calls().to_vec(), self.group_key().clone(), input) + .with_grouping_sets(self.grouping_sets().clone()) + .with_enable_two_phase(self.core().enable_two_phase) + .into() } #[must_use] @@ -1101,13 +1125,13 @@ fn find_or_append_row_count(mut logical: Agg) -> (Agg, usize) (logical, row_count_idx) } -fn new_stream_simple_agg(logical: Agg) -> StreamSimpleAgg { - let (logical, row_count_idx) = find_or_append_row_count(logical); +fn new_stream_simple_agg(core: Agg) -> StreamSimpleAgg { + let (logical, row_count_idx) = find_or_append_row_count(core); StreamSimpleAgg::new(logical, row_count_idx) } -fn new_stream_hash_agg(logical: Agg, vnode_col_idx: Option) -> StreamHashAgg { - let (logical, row_count_idx) = find_or_append_row_count(logical); +fn new_stream_hash_agg(core: Agg, vnode_col_idx: Option) -> StreamHashAgg { + let (logical, row_count_idx) = find_or_append_row_count(core); StreamHashAgg::new(logical, vnode_col_idx, row_count_idx) } diff --git a/src/frontend/src/optimizer/plan_node/logical_apply.rs b/src/frontend/src/optimizer/plan_node/logical_apply.rs index 0ea21532458fe..b398ce7494f61 100644 --- a/src/frontend/src/optimizer/plan_node/logical_apply.rs +++ b/src/frontend/src/optimizer/plan_node/logical_apply.rs @@ -18,7 +18,9 @@ use risingwave_common::catalog::Schema; use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_pb::plan_common::JoinType; -use super::generic::{self, push_down_into_join, push_down_join_condition, GenericPlanNode}; +use super::generic::{ + self, push_down_into_join, push_down_join_condition, GenericPlanNode, GenericPlanRef, +}; use super::utils::{childless_record, Distill}; use super::{ ColPrunable, LogicalJoin, LogicalProject, PlanBase, PlanRef, PlanTreeNodeBinary, @@ -86,16 +88,13 @@ impl LogicalApply { let ctx = left.ctx(); let join_core = generic::Join::with_full_output(left, right, join_type, on); let schema = join_core.schema(); - let pk_indices = join_core.logical_pk(); - let (functional_dependency, pk_indices) = match pk_indices { - Some(pk_indices) => ( - FunctionalDependencySet::with_key(schema.len(), &pk_indices), - pk_indices, - ), - None => (FunctionalDependencySet::new(schema.len()), vec![]), + let stream_key = join_core.stream_key(); + let functional_dependency = match &stream_key { + Some(stream_key) => FunctionalDependencySet::with_key(schema.len(), stream_key), + None => FunctionalDependencySet::new(schema.len()), }; let (left, right, on, join_type, _output_indices) = join_core.decompose(); - let base = PlanBase::new_logical(ctx, schema, pk_indices, functional_dependency); + let base = PlanBase::new_logical(ctx, schema, stream_key, functional_dependency); LogicalApply { base, left, diff --git a/src/frontend/src/optimizer/plan_node/logical_dedup.rs b/src/frontend/src/optimizer/plan_node/logical_dedup.rs index f070d51847fee..dd46f9af9be1d 100644 --- a/src/frontend/src/optimizer/plan_node/logical_dedup.rs +++ b/src/frontend/src/optimizer/plan_node/logical_dedup.rs @@ -38,13 +38,8 @@ pub struct LogicalDedup { impl LogicalDedup { pub fn new(input: PlanRef, dedup_cols: Vec) -> Self { - let base = PlanBase::new_logical( - input.ctx(), - input.schema().clone(), - dedup_cols.clone(), - input.functional_dependency().clone(), - ); let core = generic::Dedup { input, dedup_cols }; + let base = PlanBase::new_logical_with_core(&core); LogicalDedup { base, core } } diff --git a/src/frontend/src/optimizer/plan_node/logical_expand.rs b/src/frontend/src/optimizer/plan_node/logical_expand.rs index b32374e6dc427..d1f3b666feef5 100644 --- a/src/frontend/src/optimizer/plan_node/logical_expand.rs +++ b/src/frontend/src/optimizer/plan_node/logical_expand.rs @@ -15,6 +15,7 @@ use itertools::Itertools; use risingwave_common::error::Result; +use super::generic::GenericPlanRef; use super::utils::impl_distill_by_unit; use super::{ gen_filter_and_pushdown, generic, BatchExpand, ColPrunable, ExprRewritable, PlanBase, PlanRef, @@ -192,7 +193,7 @@ mod tests { let mut values = LogicalValues::new(vec![], Schema { fields }, ctx); values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[0], &[1, 2]); let column_subsets = vec![vec![0, 1], vec![2]]; diff --git a/src/frontend/src/optimizer/plan_node/logical_filter.rs b/src/frontend/src/optimizer/plan_node/logical_filter.rs index 72ee7d246b83d..a62b91aac5277 100644 --- a/src/frontend/src/optimizer/plan_node/logical_filter.rs +++ b/src/frontend/src/optimizer/plan_node/logical_filter.rs @@ -18,6 +18,7 @@ use risingwave_common::bail; use risingwave_common::error::Result; use risingwave_common::types::DataType; +use super::generic::GenericPlanRef; use super::utils::impl_distill_by_unit; use super::{ generic, ColPrunable, ExprRewritable, LogicalProject, PlanBase, PlanRef, PlanTreeNodeUnary, @@ -462,7 +463,7 @@ mod tests { // 3 --> 1, 2 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[3], &[1, 2]); // v1 = 0 AND v2 = v3 let predicate = ExprImpl::FunctionCall(Box::new( diff --git a/src/frontend/src/optimizer/plan_node/logical_hop_window.rs b/src/frontend/src/optimizer/plan_node/logical_hop_window.rs index 3905daaaf9f85..da2ec2138c3d1 100644 --- a/src/frontend/src/optimizer/plan_node/logical_hop_window.rs +++ b/src/frontend/src/optimizer/plan_node/logical_hop_window.rs @@ -17,7 +17,7 @@ use itertools::Itertools; use risingwave_common::error::Result; use risingwave_common::types::Interval; -use super::generic::GenericPlanNode; +use super::generic::{GenericPlanNode, GenericPlanRef}; use super::utils::impl_distill_by_unit; use super::{ gen_filter_and_pushdown, generic, BatchHopWindow, ColPrunable, ExprRewritable, LogicalFilter, @@ -63,23 +63,12 @@ impl LogicalHopWindow { output_indices, }; - let _schema = core.schema(); - let _pk_indices = core.logical_pk(); let ctx = core.ctx(); - // NOTE(st1page): add join keys in the pk_indices a work around before we really have stream - // key. - // let pk_indices = match pk_indices { - // Some(pk_indices) if functional_dependency.is_key(&pk_indices) => { - // functional_dependency.minimize_key(&pk_indices) - // } - // _ => pk_indices.unwrap_or_default(), - // }; - let base = PlanBase::new_logical( ctx, core.schema(), - core.logical_pk().unwrap_or_default(), + core.stream_key(), core.functional_dependency(), ); @@ -90,6 +79,10 @@ impl LogicalHopWindow { self.core.into_parts() } + pub fn output_indices_are_trivial(&self) -> bool { + self.output_indices() == &(0..self.core.internal_column_num()).collect_vec() + } + /// used for binder and planner. The function will add a filter operator to ignore records with /// NULL time value. pub fn create( @@ -344,7 +337,7 @@ impl ToStream for LogicalHopWindow { let i2o = self.core.i2o_col_mapping(); output_indices.extend( input - .logical_pk() + .expect_stream_key() .iter() .cloned() .filter(|i| i2o.try_map(*i).is_none()), @@ -453,7 +446,7 @@ mod test { // 0, 1 --> 2 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[0, 1], &[2]); let hop_window: PlanRef = LogicalHopWindow::new( values.into(), diff --git a/src/frontend/src/optimizer/plan_node/logical_insert.rs b/src/frontend/src/optimizer/plan_node/logical_insert.rs index 482c034302a38..e93b77d79c1f2 100644 --- a/src/frontend/src/optimizer/plan_node/logical_insert.rs +++ b/src/frontend/src/optimizer/plan_node/logical_insert.rs @@ -16,6 +16,7 @@ use pretty_xmlish::XmlNode; use risingwave_common::catalog::TableVersionId; use risingwave_common::error::Result; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ gen_filter_and_pushdown, generic, BatchInsert, ColPrunable, ExprRewritable, LogicalProject, @@ -90,7 +91,9 @@ impl_plan_tree_node_for_unary! {LogicalInsert} impl Distill for LogicalInsert { fn distill<'a>(&self) -> XmlNode<'a> { - let vec = self.core.fields_pretty(self.base.ctx.is_explain_verbose()); + let vec = self + .core + .fields_pretty(self.base.ctx().is_explain_verbose()); childless_record("LogicalInsert", vec) } } @@ -142,9 +145,9 @@ impl PredicatePushdown for LogicalInsert { impl ToBatch for LogicalInsert { fn to_batch(&self) -> Result { let new_input = self.input().to_batch()?; - let mut logical = self.core.clone(); - logical.input = new_input; - Ok(BatchInsert::new(logical).into()) + let mut core = self.core.clone(); + core.input = new_input; + Ok(BatchInsert::new(core).into()) } } diff --git a/src/frontend/src/optimizer/plan_node/logical_join.rs b/src/frontend/src/optimizer/plan_node/logical_join.rs index 640b31170c546..a586af2f0bf42 100644 --- a/src/frontend/src/optimizer/plan_node/logical_join.rs +++ b/src/frontend/src/optimizer/plan_node/logical_join.rs @@ -31,7 +31,6 @@ use super::{ }; use crate::expr::{CollectInputRef, Expr, ExprImpl, ExprRewriter, ExprType, InputRef}; use crate::optimizer::plan_node::generic::DynamicFilter; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::utils::IndicesDisplay; use crate::optimizer::plan_node::{ BatchHashJoin, BatchLookupJoin, BatchNestedLoopJoin, ColumnPruningContext, EqJoinPredicate, @@ -56,7 +55,7 @@ pub struct LogicalJoin { impl Distill for LogicalJoin { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); vec.push(("type", Pretty::debug(&self.join_type()))); @@ -184,7 +183,7 @@ impl LogicalJoin { self.core.is_full_out() } - pub fn output_indices_is_trivial(&self) -> bool { + pub fn output_indices_are_trivial(&self) -> bool { self.output_indices() == &(0..self.internal_column_num()).collect_vec() } @@ -1297,7 +1296,8 @@ impl ToBatch for LogicalJoin { logical_join.left = logical_join.left.to_batch()?; logical_join.right = logical_join.right.to_batch()?; - let config = self.base.ctx.session_ctx().config(); + let ctx = self.base.ctx(); + let config = ctx.session_ctx().config(); if predicate.has_eq() { if !predicate.eq_keys_are_type_aligned() { @@ -1395,14 +1395,14 @@ impl ToStream for LogicalJoin { // Add missing pk indices to the logical join let mut left_to_add = left - .logical_pk() + .expect_stream_key() .iter() .cloned() .filter(|i| l2o.try_map(*i).is_none()) .collect_vec(); let mut right_to_add = right - .logical_pk() + .expect_stream_key() .iter() .filter(|&&i| r2o.try_map(i).is_none()) .map(|&i| i + left_len) @@ -1464,13 +1464,13 @@ impl ToStream for LogicalJoin { .composite(&join_with_pk.core.i2o_col_mapping()); let left_right_stream_keys = join_with_pk .left() - .logical_pk() + .expect_stream_key() .iter() .map(|i| l2o.map(*i)) .chain( join_with_pk .right() - .logical_pk() + .expect_stream_key() .iter() .map(|i| r2o.map(*i)), ) @@ -2001,7 +2001,7 @@ mod tests { // 0 --> 1 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[0], &[1]); values }; @@ -2015,7 +2015,7 @@ mod tests { // 0 --> 1, 2 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[0], &[1, 2]); values }; diff --git a/src/frontend/src/optimizer/plan_node/logical_multi_join.rs b/src/frontend/src/optimizer/plan_node/logical_multi_join.rs index b3d61cd495fb9..9b740abd7718e 100644 --- a/src/frontend/src/optimizer/plan_node/logical_multi_join.rs +++ b/src/frontend/src/optimizer/plan_node/logical_multi_join.rs @@ -244,19 +244,7 @@ impl LogicalMultiJoin { .collect_vec() }; - let pk_indices = { - let mut pk_indices = vec![]; - for (i, input_pk) in inputs.iter().map(|input| input.logical_pk()).enumerate() { - for input_pk_idx in input_pk { - pk_indices.push(inner_i2o_mappings[i].map(*input_pk_idx)); - } - } - pk_indices - .into_iter() - .map(|col_idx| inner2output.try_map(col_idx)) - .collect::>>() - .unwrap_or_default() - }; + let pk_indices = Self::derive_stream_key(&inputs, &inner_i2o_mappings, &inner2output); let functional_dependency = { let mut fd_set = FunctionalDependencySet::new(tot_col_num); let mut column_cnt: usize = 0; @@ -303,6 +291,25 @@ impl LogicalMultiJoin { } } + fn derive_stream_key( + inputs: &[PlanRef], + inner_i2o_mappings: &[ColIndexMapping], + inner2output: &ColIndexMapping, + ) -> Option> { + // TODO(st1page): add JOIN key + let mut pk_indices = vec![]; + for (i, input) in inputs.iter().enumerate() { + let input_stream_key = input.stream_key()?; + for input_pk_idx in input_stream_key { + pk_indices.push(inner_i2o_mappings[i].map(*input_pk_idx)); + } + } + pk_indices + .into_iter() + .map(|col_idx| inner2output.try_map(col_idx)) + .collect::>>() + } + /// Get a reference to the logical join's on. pub fn on(&self) -> &Condition { &self.on @@ -856,6 +863,7 @@ mod test { use super::*; use crate::expr::{FunctionCall, InputRef}; use crate::optimizer::optimizer_context::OptimizerContext; + use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::LogicalValues; use crate::optimizer::property::FunctionalDependency; #[tokio::test] @@ -876,7 +884,7 @@ mod test { // 0 --> 1 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[0], &[1]); values }; @@ -890,7 +898,7 @@ mod test { // 0 --> 1, 2 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[0], &[1, 2]); values }; @@ -903,7 +911,7 @@ mod test { // {} --> 0 values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[], &[0]); values }; diff --git a/src/frontend/src/optimizer/plan_node/logical_now.rs b/src/frontend/src/optimizer/plan_node/logical_now.rs index 4a26ef6304541..1d720db15b71a 100644 --- a/src/frontend/src/optimizer/plan_node/logical_now.rs +++ b/src/frontend/src/optimizer/plan_node/logical_now.rs @@ -18,6 +18,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::error::Result; use risingwave_common::types::DataType; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ ColPrunable, ColumnPruningContext, ExprRewritable, LogicalFilter, PlanBase, PlanRef, @@ -41,14 +42,19 @@ impl LogicalNow { sub_fields: vec![], type_name: String::default(), }]); - let base = PlanBase::new_logical(ctx, schema, vec![], FunctionalDependencySet::default()); + let base = PlanBase::new_logical( + ctx, + schema, + Some(vec![]), + FunctionalDependencySet::default(), + ); Self { base } } } impl Distill for LogicalNow { fn distill<'a>(&self) -> XmlNode<'a> { - let vec = if self.base.ctx.is_explain_verbose() { + let vec = if self.base.ctx().is_explain_verbose() { vec![("output", column_names_pretty(self.schema()))] } else { vec![] diff --git a/src/frontend/src/optimizer/plan_node/logical_over_window.rs b/src/frontend/src/optimizer/plan_node/logical_over_window.rs index b2057f28e05fc..b1796ddc62752 100644 --- a/src/frontend/src/optimizer/plan_node/logical_over_window.rs +++ b/src/frontend/src/optimizer/plan_node/logical_over_window.rs @@ -17,8 +17,8 @@ use itertools::Itertools; use risingwave_common::error::{ErrorCode, Result, RwError}; use risingwave_common::types::{DataType, Datum, ScalarImpl}; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use risingwave_expr::agg::AggKind; -use risingwave_expr::function::window::{Frame, FrameBound, WindowFuncKind}; +use risingwave_expr::aggregate::AggKind; +use risingwave_expr::window_function::{Frame, FrameBound, WindowFuncKind}; use super::generic::{GenericPlanRef, OverWindow, PlanWindowFunction, ProjectBuilder}; use super::utils::impl_distill_by_unit; @@ -341,7 +341,9 @@ impl<'a> OverWindowProjectBuilder<'a> { } } -impl<'a> ExprVisitor<()> for OverWindowProjectBuilder<'a> { +impl<'a> ExprVisitor for OverWindowProjectBuilder<'a> { + type Result = (); + fn merge(_a: (), _b: ()) {} fn visit_window_function(&mut self, window_function: &WindowFunction) { @@ -832,9 +834,9 @@ impl ToStream for LogicalOverWindow { .enforce_if_not_satisfies(stream_input, &Order::any())?; let sort = StreamEowcSort::new(sort_input, order_key_index); - let mut logical = self.core.clone(); - logical.input = sort.into(); - Ok(StreamEowcOverWindow::new(logical).into()) + let mut core = self.core.clone(); + core.input = sort.into(); + Ok(StreamEowcOverWindow::new(core).into()) } else { // General (Emit-On-Update) case @@ -863,9 +865,9 @@ impl ToStream for LogicalOverWindow { let new_input = RequiredDist::shard_by_key(stream_input.schema().len(), &partition_key_indices) .enforce_if_not_satisfies(stream_input, &Order::any())?; - let mut logical = self.core.clone(); - logical.input = new_input; - Ok(StreamOverWindow::new(logical).into()) + let mut core = self.core.clone(); + core.input = new_input; + Ok(StreamOverWindow::new(core).into()) } } diff --git a/src/frontend/src/optimizer/plan_node/logical_project.rs b/src/frontend/src/optimizer/plan_node/logical_project.rs index 6e1f097e3c7d2..a96de7d91ecd5 100644 --- a/src/frontend/src/optimizer/plan_node/logical_project.rs +++ b/src/frontend/src/optimizer/plan_node/logical_project.rs @@ -264,7 +264,7 @@ impl ToStream for LogicalProject { let (proj, out_col_change) = self.rewrite_with_input(input.clone(), input_col_change); // Add missing columns of input_pk into the select list. - let input_pk = input.logical_pk(); + let input_pk = input.expect_stream_key(); let i2o = proj.i2o_col_mapping(); let col_need_to_add = input_pk .iter() @@ -284,7 +284,7 @@ impl ToStream for LogicalProject { // But the target size of `out_col_change` should be the same as the length of the new // schema. let (map, _) = out_col_change.into_parts(); - let out_col_change = ColIndexMapping::with_target_size(map, proj.base.schema.len()); + let out_col_change = ColIndexMapping::with_target_size(map, proj.base.schema().len()); Ok((proj.into(), out_col_change)) } } diff --git a/src/frontend/src/optimizer/plan_node/logical_project_set.rs b/src/frontend/src/optimizer/plan_node/logical_project_set.rs index 12100631ca666..4bf6b18cdabe3 100644 --- a/src/frontend/src/optimizer/plan_node/logical_project_set.rs +++ b/src/frontend/src/optimizer/plan_node/logical_project_set.rs @@ -363,7 +363,7 @@ impl ToStream for LogicalProjectSet { self.rewrite_with_input(input.clone(), input_col_change); // Add missing columns of input_pk into the select list. - let input_pk = input.logical_pk(); + let input_pk = input.expect_stream_key(); let i2o = self.core.i2o_col_mapping(); let col_need_to_add = input_pk .iter() @@ -427,7 +427,7 @@ mod test { let mut values = LogicalValues::new(vec![], Schema { fields }, ctx); values .base - .functional_dependency + .functional_dependency_mut() .add_functional_dependency_by_column_indices(&[1], &[2]); let project_set = LogicalProjectSet::new( values.into(), @@ -449,8 +449,9 @@ mod test { ); let fd_set: HashSet = project_set .base - .functional_dependency - .into_dependencies() + .functional_dependency() + .as_dependencies() + .clone() .into_iter() .collect(); let expected_fd_set: HashSet = diff --git a/src/frontend/src/optimizer/plan_node/logical_scan.rs b/src/frontend/src/optimizer/plan_node/logical_scan.rs index d7574abed7b29..07d2a6c7653e7 100644 --- a/src/frontend/src/optimizer/plan_node/logical_scan.rs +++ b/src/frontend/src/optimizer/plan_node/logical_scan.rs @@ -232,13 +232,20 @@ impl LogicalScan { return (self.core.clone(), Condition::true_cond(), None); } - let mut mapping = ColIndexMapping::with_target_size( - self.required_col_idx().iter().map(|i| Some(*i)).collect(), - self.table_desc().columns.len(), - ) - .inverse() - .expect("must be invertible"); - predicate = predicate.rewrite_expr(&mut mapping); + let mut inverse_mapping = { + let mapping = ColIndexMapping::with_target_size( + self.required_col_idx().iter().map(|i| Some(*i)).collect(), + self.table_desc().columns.len(), + ); + // Since `required_col_idx` mapping is not invertible, we need to inverse manually. + let mut inverse_map = vec![None; mapping.target_size()]; + for (src, dst) in mapping.mapping_pairs() { + inverse_map[dst] = Some(src); + } + ColIndexMapping::with_target_size(inverse_map, mapping.source_size()) + }; + + predicate = predicate.rewrite_expr(&mut inverse_mapping); let scan_without_predicate = generic::Scan::new( self.table_name().to_string(), @@ -266,7 +273,7 @@ impl LogicalScan { self.output_col_idx().to_vec(), self.core.table_desc.clone(), self.indexes().to_vec(), - self.base.ctx.clone(), + self.base.ctx().clone(), predicate, self.for_system_time_as_of_proctime(), self.table_cardinality(), @@ -281,7 +288,7 @@ impl LogicalScan { output_col_idx, self.core.table_desc.clone(), self.indexes().to_vec(), - self.base.ctx.clone(), + self.base.ctx().clone(), self.predicate().clone(), self.for_system_time_as_of_proctime(), self.table_cardinality(), @@ -302,7 +309,7 @@ impl_plan_tree_node_for_leaf! {LogicalScan} impl Distill for LogicalScan { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(5); vec.push(("table", Pretty::from(self.table_name().to_owned()))); let key_is_columns = @@ -390,7 +397,9 @@ impl PredicatePushdown for LogicalScan { // If the predicate contains `CorrelatedInputRef` or `now()`. We don't push down. // This case could come from the predicate push down before the subquery unnesting. struct HasCorrelated {} - impl ExprVisitor for HasCorrelated { + impl ExprVisitor for HasCorrelated { + type Result = bool; + fn merge(a: bool, b: bool) -> bool { a | b } @@ -431,7 +440,7 @@ impl LogicalScan { let (scan_ranges, predicate) = self.predicate().clone().split_to_scan_ranges( self.core.table_desc.clone(), self.base - .ctx + .ctx() .session_ctx() .config() .get_max_split_range_gap(), @@ -542,7 +551,7 @@ impl ToStream for LogicalScan { None.into(), ))); } - match self.base.logical_pk.is_empty() { + match self.base.stream_key().is_none() { true => { let mut col_ids = HashSet::new(); diff --git a/src/frontend/src/optimizer/plan_node/logical_share.rs b/src/frontend/src/optimizer/plan_node/logical_share.rs index d924ee7180168..d6b5711740a98 100644 --- a/src/frontend/src/optimizer/plan_node/logical_share.rs +++ b/src/frontend/src/optimizer/plan_node/logical_share.rs @@ -69,7 +69,7 @@ impl LogicalShare { } pub(super) fn pretty_fields<'a>(base: &PlanBase, name: &'a str) -> XmlNode<'a> { - childless_record(name, vec![("id", Pretty::debug(&base.id.0))]) + childless_record(name, vec![("id", Pretty::debug(&base.id().0))]) } } diff --git a/src/frontend/src/optimizer/plan_node/logical_source.rs b/src/frontend/src/optimizer/plan_node/logical_source.rs index 4ac006887ef55..45a5fbcb2240f 100644 --- a/src/frontend/src/optimizer/plan_node/logical_source.rs +++ b/src/frontend/src/optimizer/plan_node/logical_source.rs @@ -17,14 +17,18 @@ use std::ops::Bound; use std::ops::Bound::{Excluded, Included, Unbounded}; use std::rc::Rc; +use fixedbitset::FixedBitSet; use itertools::Itertools; use pretty_xmlish::{Pretty, XmlNode}; -use risingwave_common::catalog::{ColumnCatalog, Schema, KAFKA_TIMESTAMP_COLUMN_NAME}; -use risingwave_common::error::Result; -use risingwave_connector::source::DataType; +use risingwave_common::catalog::{ + ColumnCatalog, ColumnDesc, Field, Schema, KAFKA_TIMESTAMP_COLUMN_NAME, +}; +use risingwave_common::error::{ErrorCode, Result, RwError, TrackingIssue}; +use risingwave_connector::source::{ConnectorProperties, DataType}; use risingwave_pb::plan_common::column_desc::GeneratedOrDefaultColumn; use risingwave_pb::plan_common::GeneratedColumnDesc; +use super::generic::GenericPlanRef; use super::stream_watermark_filter::StreamWatermarkFilter; use super::utils::{childless_record, Distill}; use super::{ @@ -35,10 +39,13 @@ use super::{ use crate::catalog::source_catalog::SourceCatalog; use crate::expr::{Expr, ExprImpl, ExprRewriter, ExprType, InputRef}; use crate::optimizer::optimizer_context::OptimizerContextRef; +use crate::optimizer::plan_node::stream_fs_fetch::StreamFsFetch; use crate::optimizer::plan_node::utils::column_names_pretty; use crate::optimizer::plan_node::{ - ColumnPruningContext, PredicatePushdownContext, RewriteStreamContext, ToStreamContext, + ColumnPruningContext, PredicatePushdownContext, RewriteStreamContext, StreamDedup, + ToStreamContext, }; +use crate::optimizer::property::{Distribution, Order, RequiredDist}; use crate::utils::{ColIndexMapping, Condition, IndexRewriter}; /// `LogicalSource` returns contents of a table or other equivalent object @@ -152,6 +159,73 @@ impl LogicalSource { Ok(Some(exprs)) } + fn rewrite_new_s3_plan(&self) -> Result { + let logical_source = generic::Source { + catalog: self.core.catalog.clone(), + column_catalog: vec![ + ColumnCatalog { + column_desc: ColumnDesc::from_field_with_column_id( + &Field { + name: "filename".to_string(), + data_type: DataType::Varchar, + sub_fields: vec![], + type_name: "".to_string(), + }, + 0, + ), + is_hidden: false, + }, + ColumnCatalog { + column_desc: ColumnDesc::from_field_with_column_id( + &Field { + name: "last_edit_time".to_string(), + data_type: DataType::Timestamp, + sub_fields: vec![], + type_name: "".to_string(), + }, + 1, + ), + is_hidden: false, + }, + ColumnCatalog { + column_desc: ColumnDesc::from_field_with_column_id( + &Field { + name: "file_size".to_string(), + data_type: DataType::Int64, + sub_fields: vec![], + type_name: "".to_string(), + }, + 0, + ), + is_hidden: false, + }, + ], + row_id_index: None, + gen_row_id: false, + ..self.core.clone() + }; + let mut new_s3_plan: PlanRef = StreamSource { + base: PlanBase::new_stream_with_core( + &logical_source, + Distribution::Single, + true, // `list` will keep listing all objects, it must be append-only + false, + FixedBitSet::with_capacity(logical_source.column_catalog.len()), + ), + core: logical_source, + } + .into(); + new_s3_plan = RequiredDist::shard_by_key(3, &[0]) + .enforce_if_not_satisfies(new_s3_plan, &Order::any())?; + new_s3_plan = StreamDedup::new(generic::Dedup { + input: new_s3_plan, + dedup_cols: vec![0], + }) + .into(); + + Ok(new_s3_plan) + } + /// `row_id_index` in source node should rule out generated column #[must_use] fn rewrite_row_id_idx(columns: &[ColumnCatalog], row_id_index: Option) -> Option { @@ -200,14 +274,18 @@ impl LogicalSource { } } - fn wrap_with_optional_generated_columns_stream_proj(&self) -> Result { + fn wrap_with_optional_generated_columns_stream_proj( + &self, + input: Option, + ) -> Result { if let Some(exprs) = &self.output_exprs { - let source = StreamSource::new(self.rewrite_to_stream_batch_source()); - let logical_project = generic::Project::new(exprs.to_vec(), source.into()); + let source: PlanRef = + dispatch_new_s3_plan(self.rewrite_to_stream_batch_source(), input); + let logical_project = generic::Project::new(exprs.to_vec(), source); Ok(StreamProject::new(logical_project).into()) } else { - let source = StreamSource::new(self.core.clone()); - Ok(source.into()) + let source = dispatch_new_s3_plan(self.core.clone(), input); + Ok(source) } } @@ -369,8 +447,7 @@ fn expr_to_kafka_timestamp_range( match &expr { ExprImpl::FunctionCall(function_call) => { - if let Some((timestampz_literal, reverse)) = extract_timestampz_literal(&expr).unwrap() - { + if let Ok(Some((timestampz_literal, reverse))) = extract_timestampz_literal(&expr) { match function_call.func_type() { ExprType::GreaterThan => { if reverse { @@ -430,7 +507,7 @@ impl PredicatePushdown for LogicalSource { let mut new_conjunctions = Vec::with_capacity(predicate.conjunctions.len()); for expr in predicate.conjunctions { - if let Some(e) = expr_to_kafka_timestamp_range(expr, &mut range, &self.base.schema) { + if let Some(e) = expr_to_kafka_timestamp_range(expr, &mut range, self.base.schema()) { // Not recognized, so push back new_conjunctions.push(e); } @@ -453,6 +530,16 @@ impl PredicatePushdown for LogicalSource { impl ToBatch for LogicalSource { fn to_batch(&self) -> Result { + if self.core.catalog.is_some() + && ConnectorProperties::is_new_fs_connector_b_tree_map( + &self.core.catalog.as_ref().unwrap().properties, + ) + { + return Err(RwError::from(ErrorCode::NotImplemented( + "New S3 connector for batch".to_string(), + TrackingIssue::from(None), + ))); + } let source = self.wrap_with_optional_generated_columns_batch_proj()?; Ok(source) } @@ -460,11 +547,20 @@ impl ToBatch for LogicalSource { impl ToStream for LogicalSource { fn to_stream(&self, _ctx: &mut ToStreamContext) -> Result { - let mut plan = if self.core.for_table { - StreamSource::new(self.rewrite_to_stream_batch_source()).into() + let mut plan_prefix: Option = None; + let mut plan: PlanRef; + if self.core.catalog.is_some() + && ConnectorProperties::is_new_fs_connector_b_tree_map( + &self.core.catalog.as_ref().unwrap().properties, + ) + { + plan_prefix = Some(self.rewrite_new_s3_plan()?); + } + plan = if self.core.for_table { + dispatch_new_s3_plan(self.rewrite_to_stream_batch_source(), plan_prefix) } else { // Create MV on source. - self.wrap_with_optional_generated_columns_stream_proj()? + self.wrap_with_optional_generated_columns_stream_proj(plan_prefix)? }; if let Some(catalog) = self.source_catalog() @@ -491,3 +587,12 @@ impl ToStream for LogicalSource { )) } } + +#[inline] +fn dispatch_new_s3_plan(source: generic::Source, input: Option) -> PlanRef { + if let Some(input) = input { + StreamFsFetch::new(input, source).into() + } else { + StreamSource::new(source).into() + } +} diff --git a/src/frontend/src/optimizer/plan_node/logical_table_function.rs b/src/frontend/src/optimizer/plan_node/logical_table_function.rs index ee60a624be3ba..15d510cc1c6fd 100644 --- a/src/frontend/src/optimizer/plan_node/logical_table_function.rs +++ b/src/frontend/src/optimizer/plan_node/logical_table_function.rs @@ -14,7 +14,7 @@ use pretty_xmlish::{Pretty, XmlNode}; use risingwave_common::catalog::{Field, Schema}; -use risingwave_common::error::{ErrorCode, Result}; +use risingwave_common::error::Result; use risingwave_common::types::DataType; use super::utils::{childless_record, Distill}; @@ -25,23 +25,30 @@ use super::{ use crate::expr::{Expr, ExprRewriter, TableFunction}; use crate::optimizer::optimizer_context::OptimizerContextRef; use crate::optimizer::plan_node::{ - BatchTableFunction, ColumnPruningContext, PredicatePushdownContext, RewriteStreamContext, - ToStreamContext, + ColumnPruningContext, PredicatePushdownContext, RewriteStreamContext, ToStreamContext, }; use crate::optimizer::property::FunctionalDependencySet; use crate::utils::{ColIndexMapping, Condition}; -/// `LogicalGenerateSeries` implements Hop Table Function. +/// `LogicalTableFunction` is a scalar/table function used as a relation (in the `FROM` clause). +/// +/// If the function returns a struct, it will be flattened into multiple columns. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct LogicalTableFunction { pub base: PlanBase, pub table_function: TableFunction, + pub with_ordinality: bool, } impl LogicalTableFunction { /// Create a [`LogicalTableFunction`] node. Used internally by optimizer. - pub fn new(table_function: TableFunction, ctx: OptimizerContextRef) -> Self { - let schema = if let DataType::Struct(s) = table_function.return_type() { + pub fn new( + table_function: TableFunction, + with_ordinality: bool, + ctx: OptimizerContextRef, + ) -> Self { + let mut schema = if let DataType::Struct(s) = table_function.return_type() { + // If the function returns a struct, it will be flattened into multiple columns. Schema::from(&s) } else { Schema { @@ -51,11 +58,17 @@ impl LogicalTableFunction { )], } }; + if with_ordinality { + schema + .fields + .push(Field::with_name(DataType::Int64, "ordinality")); + } let functional_dependency = FunctionalDependencySet::new(schema.len()); - let base = PlanBase::new_logical(ctx, schema, vec![], functional_dependency); + let base = PlanBase::new_logical(ctx, schema, None, functional_dependency); Self { base, table_function, + with_ordinality, } } @@ -110,26 +123,19 @@ impl PredicatePushdown for LogicalTableFunction { impl ToBatch for LogicalTableFunction { fn to_batch(&self) -> Result { - Ok(BatchTableFunction::new(self.clone()).into()) + unreachable!("TableFunction should be converted to ProjectSet") } } impl ToStream for LogicalTableFunction { fn to_stream(&self, _ctx: &mut ToStreamContext) -> Result { - Err( - ErrorCode::NotImplemented("LogicalTableFunction::to_stream".to_string(), None.into()) - .into(), - ) + unreachable!("TableFunction should be converted to ProjectSet") } fn logical_rewrite_for_stream( &self, _ctx: &mut RewriteStreamContext, ) -> Result<(PlanRef, ColIndexMapping)> { - Err(ErrorCode::NotImplemented( - "LogicalTableFunction::logical_rewrite_for_stream".to_string(), - None.into(), - ) - .into()) + unreachable!("TableFunction should be converted to ProjectSet") } } diff --git a/src/frontend/src/optimizer/plan_node/logical_topn.rs b/src/frontend/src/optimizer/plan_node/logical_topn.rs index 8d9d446c0900f..39d97a56fe3a6 100644 --- a/src/frontend/src/optimizer/plan_node/logical_topn.rs +++ b/src/frontend/src/optimizer/plan_node/logical_topn.rs @@ -109,32 +109,33 @@ impl LogicalTopN { fn gen_dist_stream_top_n_plan(&self, stream_input: PlanRef) -> Result { let input_dist = stream_input.distribution().clone(); - let gen_single_plan = |stream_input: PlanRef| -> Result { - let input = - RequiredDist::single().enforce_if_not_satisfies(stream_input, &Order::any())?; - let mut logical = self.core.clone(); - logical.input = input; - Ok(StreamTopN::new(logical).into()) - }; - // if it is append only, for now we don't generate 2-phase rules if stream_input.append_only() { - return gen_single_plan(stream_input); + return self.gen_single_stream_top_n_plan(stream_input); } match input_dist { - Distribution::Single | Distribution::SomeShard => gen_single_plan(stream_input), + Distribution::Single | Distribution::SomeShard => { + self.gen_single_stream_top_n_plan(stream_input) + } Distribution::Broadcast => Err(RwError::from(ErrorCode::NotImplemented( "topN does not support Broadcast".to_string(), None.into(), ))), Distribution::HashShard(dists) | Distribution::UpstreamHashShard(dists, _) => { - self.gen_vnode_two_phase_streaming_top_n_plan(stream_input, &dists) + self.gen_vnode_two_phase_stream_top_n_plan(stream_input, &dists) } } } - fn gen_vnode_two_phase_streaming_top_n_plan( + fn gen_single_stream_top_n_plan(&self, stream_input: PlanRef) -> Result { + let input = RequiredDist::single().enforce_if_not_satisfies(stream_input, &Order::any())?; + let mut core = self.core.clone(); + core.input = input; + Ok(StreamTopN::new(core).into()) + } + + fn gen_vnode_two_phase_stream_top_n_plan( &self, stream_input: PlanRef, dist_key: &[usize], @@ -147,6 +148,7 @@ impl LogicalTopN { .enumerate() .map(|(idx, field)| InputRef::new(idx, field.data_type.clone()).into()) .collect(); + exprs.push( FunctionCall::new( ExprType::Vnode, @@ -159,25 +161,30 @@ impl LogicalTopN { ); let vnode_col_idx = exprs.len() - 1; let project = StreamProject::new(generic::Project::new(exprs.clone(), stream_input)); + let limit_attr = TopNLimit::new( self.limit_attr().limit() + self.offset(), self.limit_attr().with_ties(), ); - let mut logical_top_n = - generic::TopN::without_group(project.into(), limit_attr, 0, self.topn_order().clone()); - logical_top_n.group_key = vec![vnode_col_idx]; - let local_top_n = StreamGroupTopN::new(logical_top_n, Some(vnode_col_idx)); + let local_top_n = generic::TopN::with_group( + project.into(), + limit_attr, + 0, + self.topn_order().clone(), + vec![vnode_col_idx], + ); + let local_top_n = StreamGroupTopN::new(local_top_n, Some(vnode_col_idx)); + let exchange = RequiredDist::single().enforce_if_not_satisfies(local_top_n.into(), &Order::any())?; + let global_top_n = generic::TopN::without_group( exchange, self.limit_attr(), self.offset(), self.topn_order().clone(), ); - - // TODO(st1page): solve it - let global_top_n = StreamTopN::with_stream_key(global_top_n, self.logical_pk().to_vec()); + let global_top_n = StreamTopN::new(global_top_n); // use another projection to remove the column we added before. exprs.pop(); @@ -329,9 +336,9 @@ impl ToStream for LogicalTopN { let input = self.input().to_stream(ctx)?; let input = RequiredDist::hash_shard(self.group_key()) .enforce_if_not_satisfies(input, &Order::any())?; - let mut logical = self.core.clone(); - logical.input = input; - StreamGroupTopN::new(logical, None).into() + let mut core = self.core.clone(); + core.input = input; + StreamGroupTopN::new(core, None).into() } else { self.gen_dist_stream_top_n_plan(self.input().to_stream(ctx)?)? }) diff --git a/src/frontend/src/optimizer/plan_node/logical_union.rs b/src/frontend/src/optimizer/plan_node/logical_union.rs index e21b39088315e..1f02b026c0020 100644 --- a/src/frontend/src/optimizer/plan_node/logical_union.rs +++ b/src/frontend/src/optimizer/plan_node/logical_union.rs @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::cmp::max; +use std::collections::BTreeMap; + use itertools::Itertools; use risingwave_common::catalog::Schema; use risingwave_common::error::Result; @@ -28,6 +31,7 @@ use crate::optimizer::plan_node::{ }; use crate::optimizer::property::RequiredDist; use crate::utils::{ColIndexMapping, Condition}; +use crate::Explain; /// `LogicalUnion` returns the union of the rows of its inputs. /// If `all` is false, it needs to eliminate duplicates. @@ -125,11 +129,10 @@ impl ToBatch for LogicalUnion { // Convert union to union all + agg if !self.all() { let batch_union = BatchUnion::new(new_logical).into(); - Ok(BatchHashAgg::new(generic::Agg::new( - vec![], - (0..self.base.schema.len()).collect(), - batch_union, - )) + Ok(BatchHashAgg::new( + generic::Agg::new(vec![], (0..self.base.schema().len()).collect(), batch_union) + .with_enable_two_phase(false), + ) .into()) } else { Ok(BatchUnion::new(new_logical).into()) @@ -140,7 +143,12 @@ impl ToBatch for LogicalUnion { impl ToStream for LogicalUnion { fn to_stream(&self, ctx: &mut ToStreamContext) -> Result { // TODO: use round robin distribution instead of using hash distribution of all inputs. - let dist = RequiredDist::hash_shard(self.base.logical_pk()); + let dist = RequiredDist::hash_shard(self.base.stream_key().unwrap_or_else(|| { + panic!( + "should always have a stream key in the stream plan but not, sub plan: {}", + PlanRef::from(self.clone()).explain_to_string() + ) + })); let new_inputs: Result> = self .inputs() .iter() @@ -162,25 +170,25 @@ impl ToStream for LogicalUnion { &self, ctx: &mut RewriteStreamContext, ) -> Result<(PlanRef, ColIndexMapping)> { - let original_schema = self.base.schema.clone(); + let original_schema = self.base.schema().clone(); let original_schema_len = original_schema.len(); let mut rewrites = vec![]; for input in &self.core.inputs { rewrites.push(input.logical_rewrite_for_stream(ctx)?); } - let original_schema_contain_all_input_pks = + let original_schema_contain_all_input_stream_keys = rewrites.iter().all(|(new_input, col_index_mapping)| { let original_schema_new_pos = (0..original_schema_len) .map(|x| col_index_mapping.map(x)) .collect_vec(); new_input - .logical_pk() + .expect_stream_key() .iter() .all(|x| original_schema_new_pos.contains(x)) }); - if original_schema_contain_all_input_pks { + if original_schema_contain_all_input_stream_keys { // Add one more column at the end of the original schema to identify the record came // from which input. [original_schema + source_col] let new_inputs = rewrites @@ -218,29 +226,45 @@ impl ToStream for LogicalUnion { Ok((new_union.into(), out_col_change)) } else { // In order to ensure all inputs have the same schema for new union, we construct new - // schema like that: [original_schema + input1_pk + input2_pk + ... + - // source_col] - let input_pk_types = rewrites - .iter() - .flat_map(|(new_input, _)| { - new_input - .logical_pk() - .iter() - .map(|x| new_input.schema().fields[*x].data_type()) - }) - .collect_vec(); - let input_pk_nulls = input_pk_types + // schema like that: [original_schema + merged_stream_key + source_col] + // where merged_stream_key is merged by the types of each input stream key. + // If all inputs have the same stream key column types, we have a small merged_stream_key. Otherwise, we will have a large merged_stream_key. + + let (merged_stream_key_types, types_offset) = { + let mut max_types_counter = BTreeMap::default(); + for (new_input, _) in &rewrites { + let mut types_counter = BTreeMap::default(); + for x in new_input.expect_stream_key() { + types_counter + .entry(new_input.schema().fields[*x].data_type()) + .and_modify(|x| *x += 1) + .or_insert(1); + } + for (key, val) in types_counter { + max_types_counter + .entry(key) + .and_modify(|x| *x = max(*x, val)) + .or_insert(val); + } + } + + let mut merged_stream_key_types = vec![]; + let mut types_offset = BTreeMap::default(); + let mut offset = 0; + for (key, val) in max_types_counter { + let _ = types_offset.insert(key.clone(), offset); + offset += val; + merged_stream_key_types.extend(std::iter::repeat(key.clone()).take(val)); + } + + (merged_stream_key_types, types_offset) + }; + + let input_stream_key_nulls = merged_stream_key_types .iter() .map(|t| ExprImpl::Literal(Literal::new(None, t.clone()).into())) .collect_vec(); - let input_pk_lens = rewrites - .iter() - .map(|(new_input, _)| new_input.logical_pk().len()) - .collect_vec(); - let mut input_pk_offsets = vec![0]; - for (i, len) in input_pk_lens.into_iter().enumerate() { - input_pk_offsets.push(input_pk_offsets[i] + len) - } + let new_inputs = rewrites .into_iter() .enumerate() @@ -257,18 +281,22 @@ impl ToStream for LogicalUnion { ) }) .collect_vec(); - // input1_pk + input2_pk + ... - let mut input_pks = input_pk_nulls.clone(); - for (j, pk_idx) in new_input.logical_pk().iter().enumerate() { - input_pks[input_pk_offsets[i] + j] = ExprImpl::InputRef( - InputRef::new( - *pk_idx, - new_input.schema().fields[*pk_idx].data_type.clone(), - ) - .into(), - ); + // merged_stream_key + let mut input_stream_keys = input_stream_key_nulls.clone(); + let mut types_counter = BTreeMap::default(); + for stream_key_idx in new_input.expect_stream_key() { + let data_type = + new_input.schema().fields[*stream_key_idx].data_type.clone(); + let count = *types_counter + .entry(data_type.clone()) + .and_modify(|x| *x += 1) + .or_insert(1); + let type_start_offset = *types_offset.get(&data_type).unwrap(); + + input_stream_keys[type_start_offset + count - 1] = + ExprImpl::InputRef(InputRef::new(*stream_key_idx, data_type).into()); } - exprs.extend(input_pks); + exprs.extend(input_stream_keys); // source_col exprs.push(ExprImpl::Literal( Literal::new(Some((i as i32).to_scalar_value()), DataType::Int32).into(), @@ -280,7 +308,7 @@ impl ToStream for LogicalUnion { let new_union = LogicalUnion::new_with_source_col( self.all(), new_inputs, - Some(original_schema_len + input_pk_types.len()), + Some(original_schema_len + merged_stream_key_types.len()), ); // We have already used project to map rewrite input to the origin schema, so we can use // identity with the new schema len. @@ -325,7 +353,7 @@ mod tests { // Check the result let union = plan.as_logical_union().unwrap(); - assert_eq!(union.base.schema.len(), 2); + assert_eq!(union.base.schema().len(), 2); } #[tokio::test] diff --git a/src/frontend/src/optimizer/plan_node/logical_update.rs b/src/frontend/src/optimizer/plan_node/logical_update.rs index 0c903559b4e2b..1dbe1d3d3c5c9 100644 --- a/src/frontend/src/optimizer/plan_node/logical_update.rs +++ b/src/frontend/src/optimizer/plan_node/logical_update.rs @@ -12,12 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::vec; - -use risingwave_common::catalog::{Field, Schema, TableVersionId}; +use risingwave_common::catalog::TableVersionId; use risingwave_common::error::Result; -use risingwave_common::types::DataType; +use super::generic::GenericPlanRef; use super::utils::impl_distill_by_unit; use super::{ gen_filter_and_pushdown, generic, BatchUpdate, ColPrunable, ExprRewritable, LogicalProject, @@ -28,7 +26,6 @@ use crate::expr::{ExprImpl, ExprRewriter}; use crate::optimizer::plan_node::{ ColumnPruningContext, PredicatePushdownContext, RewriteStreamContext, ToStreamContext, }; -use crate::optimizer::property::FunctionalDependencySet; use crate::utils::{ColIndexMapping, Condition}; /// [`LogicalUpdate`] iterates on input relation, set some columns, and inject update records into @@ -43,14 +40,7 @@ pub struct LogicalUpdate { impl From> for LogicalUpdate { fn from(core: generic::Update) -> Self { - let ctx = core.input.ctx(); - let schema = if core.returning { - core.input.schema().clone() - } else { - Schema::new(vec![Field::unnamed(DataType::Int64)]) - }; - let fd_set = FunctionalDependencySet::new(schema.len()); - let base = PlanBase::new_logical(ctx, schema, vec![], fd_set); + let base = PlanBase::new_logical_with_core(&core); Self { base, core } } } diff --git a/src/frontend/src/optimizer/plan_node/logical_values.rs b/src/frontend/src/optimizer/plan_node/logical_values.rs index e6f8dc59c63ff..e62c6400f2015 100644 --- a/src/frontend/src/optimizer/plan_node/logical_values.rs +++ b/src/frontend/src/optimizer/plan_node/logical_values.rs @@ -21,6 +21,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::error::Result; use risingwave_common::types::{DataType, ScalarImpl}; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ BatchValues, ColPrunable, ExprRewritable, LogicalFilter, PlanBase, PlanRef, PredicatePushdown, @@ -50,7 +51,7 @@ impl LogicalValues { } } let functional_dependency = FunctionalDependencySet::new(schema.len()); - let base = PlanBase::new_logical(ctx, schema, vec![], functional_dependency); + let base = PlanBase::new_logical(ctx, schema, None, functional_dependency); Self { rows: rows.into(), base, @@ -70,7 +71,7 @@ impl LogicalValues { } } let functional_dependency = FunctionalDependencySet::new(schema.len()); - let base = PlanBase::new_logical(ctx, schema, vec![pk_index], functional_dependency); + let base = PlanBase::new_logical(ctx, schema, Some(vec![pk_index]), functional_dependency); Self { rows: rows.into(), base, @@ -144,7 +145,7 @@ impl ColPrunable for LogicalValues { .iter() .map(|i| self.schema().fields[*i].clone()) .collect(); - Self::new(rows, Schema { fields }, self.base.ctx.clone()).into() + Self::new(rows, Schema { fields }, self.base.ctx().clone()).into() } } diff --git a/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs b/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs index 73f82e86aa260..9f2e8d94634be 100644 --- a/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs +++ b/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::hash::Hash; +use super::generic::GenericPlanRef; use super::{EndoPlan, LogicalShare, PlanNodeId, PlanRef, PlanTreeNodeUnary, VisitPlan}; use crate::optimizer::plan_visitor; use crate::utils::{Endo, Visit}; diff --git a/src/frontend/src/optimizer/plan_node/mod.rs b/src/frontend/src/optimizer/plan_node/mod.rs index 926cf85048f3e..f16ebfb0c792c 100644 --- a/src/frontend/src/optimizer/plan_node/mod.rs +++ b/src/frontend/src/optimizer/plan_node/mod.rs @@ -46,7 +46,7 @@ use serde::Serialize; use smallvec::SmallVec; use self::batch::BatchPlanRef; -use self::generic::GenericPlanRef; +use self::generic::{GenericPlanRef, PhysicalPlanRef}; use self::stream::StreamPlanRef; use self::utils::Distill; use super::property::{Distribution, FunctionalDependencySet, Order}; @@ -218,6 +218,15 @@ impl RewriteExprsRecursive for PlanRef { } impl PlanRef { + pub fn expect_stream_key(&self) -> &[usize] { + self.stream_key().unwrap_or_else(|| { + panic!( + "a stream key is expected but not exist, plan:\n{}", + self.explain_to_string() + ) + }) + } + fn prune_col_inner(&self, required_cols: &[usize], ctx: &mut ColumnPruningContext) -> PlanRef { if let Some(logical_share) = self.as_logical_share() { // Check the share cache first. If cache exists, it means this is the second round of @@ -410,33 +419,35 @@ impl PlanTreeNode for PlanRef { } } -impl StreamPlanRef for PlanRef { - fn distribution(&self) -> &Distribution { - &self.plan_base().dist +impl PlanNodeMeta for PlanRef { + fn node_type(&self) -> PlanNodeType { + self.0.node_type() } - fn append_only(&self) -> bool { - self.plan_base().append_only + fn plan_base(&self) -> &PlanBase { + self.0.plan_base() } - fn emit_on_window_close(&self) -> bool { - self.plan_base().emit_on_window_close + fn convention(&self) -> Convention { + self.0.convention() } } -impl BatchPlanRef for PlanRef { - fn order(&self) -> &Order { - &self.plan_base().order +/// Implement for every type that provides [`PlanBase`] through [`PlanNodeMeta`]. +impl

GenericPlanRef for P +where + P: PlanNodeMeta + Eq + Hash, +{ + fn id(&self) -> PlanNodeId { + self.plan_base().id() } -} -impl GenericPlanRef for PlanRef { fn schema(&self) -> &Schema { - &self.plan_base().schema + self.plan_base().schema() } - fn logical_pk(&self) -> &[usize] { - &self.plan_base().logical_pk + fn stream_key(&self) -> Option<&[usize]> { + self.plan_base().stream_key() } fn ctx(&self) -> OptimizerContextRef { @@ -448,6 +459,47 @@ impl GenericPlanRef for PlanRef { } } +/// Implement for every type that provides [`PlanBase`] through [`PlanNodeMeta`]. +// TODO: further constrain the convention to be `Stream` or `Batch`. +impl

PhysicalPlanRef for P +where + P: PlanNodeMeta + Eq + Hash, +{ + fn distribution(&self) -> &Distribution { + self.plan_base().distribution() + } +} + +/// Implement for every type that provides [`PlanBase`] through [`PlanNodeMeta`]. +// TODO: further constrain the convention to be `Stream`. +impl

StreamPlanRef for P +where + P: PlanNodeMeta + Eq + Hash, +{ + fn append_only(&self) -> bool { + self.plan_base().append_only() + } + + fn emit_on_window_close(&self) -> bool { + self.plan_base().emit_on_window_close() + } + + fn watermark_columns(&self) -> &FixedBitSet { + self.plan_base().watermark_columns() + } +} + +/// Implement for every type that provides [`PlanBase`] through [`PlanNodeMeta`]. +// TODO: further constrain the convention to be `Batch`. +impl

BatchPlanRef for P +where + P: PlanNodeMeta + Eq + Hash, +{ + fn order(&self) -> &Order { + self.plan_base().order() + } +} + /// In order to let expression display id started from 1 for explaining, hidden column names and /// other places. We will reset expression display id to 0 and clone the whole plan to reset the /// schema. @@ -503,43 +555,44 @@ pub(crate) fn pretty_config() -> PrettyConfig { impl dyn PlanNode { pub fn id(&self) -> PlanNodeId { - self.plan_base().id + self.plan_base().id() } pub fn ctx(&self) -> OptimizerContextRef { - self.plan_base().ctx.clone() + self.plan_base().ctx().clone() } pub fn schema(&self) -> &Schema { - &self.plan_base().schema + self.plan_base().schema() } - pub fn logical_pk(&self) -> &[usize] { - &self.plan_base().logical_pk + pub fn stream_key(&self) -> Option<&[usize]> { + self.plan_base().stream_key() } pub fn order(&self) -> &Order { - &self.plan_base().order + self.plan_base().order() } + // TODO: avoid no manual delegation pub fn distribution(&self) -> &Distribution { - &self.plan_base().dist + self.plan_base().distribution() } pub fn append_only(&self) -> bool { - self.plan_base().append_only + self.plan_base().append_only() } pub fn emit_on_window_close(&self) -> bool { - self.plan_base().emit_on_window_close + self.plan_base().emit_on_window_close() } pub fn functional_dependency(&self) -> &FunctionalDependencySet { - &self.plan_base().functional_dependency + self.plan_base().functional_dependency() } pub fn watermark_columns(&self) -> &FixedBitSet { - &self.plan_base().watermark_columns + self.plan_base().watermark_columns() } /// Serialize the plan node and its children to a stream plan proto. @@ -566,7 +619,12 @@ impl dyn PlanNode { identity: self.explain_myself_to_string(), node_body: node, operator_id: self.id().0 as _, - stream_key: self.logical_pk().iter().map(|x| *x as u32).collect(), + stream_key: self + .stream_key() + .unwrap_or_default() + .iter() + .map(|x| *x as u32) + .collect(), fields: self.schema().to_prost(), append_only: self.append_only(), } @@ -603,8 +661,6 @@ impl dyn PlanNode { } mod plan_base; -#[macro_use] -mod plan_tree_node_v2; pub use plan_base::*; #[macro_use] mod plan_tree_node; @@ -627,7 +683,6 @@ pub use merge_eq_nodes::*; pub mod batch; pub mod generic; pub mod stream; -pub mod stream_derive; pub use generic::{PlanAggCall, PlanAggCallDisplay}; @@ -689,6 +744,7 @@ mod stream_eowc_over_window; mod stream_exchange; mod stream_expand; mod stream_filter; +mod stream_fs_fetch; mod stream_group_topn; mod stream_hash_agg; mod stream_hash_join; @@ -773,6 +829,7 @@ pub use stream_eowc_over_window::StreamEowcOverWindow; pub use stream_exchange::StreamExchange; pub use stream_expand::StreamExpand; pub use stream_filter::StreamFilter; +pub use stream_fs_fetch::StreamFsFetch; pub use stream_group_topn::StreamGroupTopN; pub use stream_hash_agg::StreamHashAgg; pub use stream_hash_join::StreamHashJoin; @@ -898,6 +955,7 @@ macro_rules! for_all_plan_nodes { , { Stream, EowcOverWindow } , { Stream, EowcSort } , { Stream, OverWindow } + , { Stream, FsFetch } } }; } @@ -1005,6 +1063,7 @@ macro_rules! for_stream_plan_nodes { , { Stream, EowcOverWindow } , { Stream, EowcSort } , { Stream, OverWindow } + , { Stream, FsFetch } } }; } diff --git a/src/frontend/src/optimizer/plan_node/plan_base.rs b/src/frontend/src/optimizer/plan_node/plan_base.rs index 41dd857282fbe..51b1aa5f41141 100644 --- a/src/frontend/src/optimizer/plan_node/plan_base.rs +++ b/src/frontend/src/optimizer/plan_node/plan_base.rs @@ -14,53 +14,138 @@ use educe::Educe; use fixedbitset::FixedBitSet; -use paste::paste; use risingwave_common::catalog::Schema; use super::generic::GenericPlanNode; use super::*; -use crate::for_all_plan_nodes; use crate::optimizer::optimizer_context::OptimizerContextRef; use crate::optimizer::property::{Distribution, FunctionalDependencySet, Order}; -/// the common fields of all nodes, please make a field named `base` in -/// every planNode and correctly value it when construct the planNode. -#[derive(Clone, Debug, Educe)] -#[educe(PartialEq, Eq, Hash)] -pub struct PlanBase { - #[educe(PartialEq(ignore))] - #[educe(Hash(ignore))] - pub id: PlanNodeId, - #[educe(PartialEq(ignore))] - #[educe(Hash(ignore))] - pub ctx: OptimizerContextRef, - pub schema: Schema, - /// the pk indices of the PlanNode's output, a empty logical_pk vec means there is no pk - pub logical_pk: Vec, - /// The order property of the PlanNode's output, store an `&Order::any()` here will not affect - /// correctness, but insert unnecessary sort in plan - pub order: Order, +/// Common extra fields for physical plan nodes. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct PhysicalCommonExtra { /// The distribution property of the PlanNode's output, store an `Distribution::any()` here /// will not affect correctness, but insert unnecessary exchange in plan - pub dist: Distribution, + dist: Distribution, +} + +/// Extra fields for stream plan nodes. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct StreamExtra { + /// Common fields for physical plan nodes. + physical: PhysicalCommonExtra, + /// The append-only property of the PlanNode's output is a stream-only property. Append-only /// means the stream contains only insert operation. - pub append_only: bool, + append_only: bool, /// Whether the output is emitted on window close. - pub emit_on_window_close: bool, - pub functional_dependency: FunctionalDependencySet, + emit_on_window_close: bool, /// The watermark column indices of the PlanNode's output. There could be watermark output from /// this stream operator. - pub watermark_columns: FixedBitSet, + watermark_columns: FixedBitSet, +} + +/// Extra fields for batch plan nodes. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct BatchExtra { + /// Common fields for physical plan nodes. + physical: PhysicalCommonExtra, + + /// The order property of the PlanNode's output, store an `&Order::any()` here will not affect + /// correctness, but insert unnecessary sort in plan + order: Order, +} + +/// Extra fields for physical plan nodes. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +enum PhysicalExtra { + Stream(StreamExtra), + Batch(BatchExtra), +} + +impl PhysicalExtra { + fn common(&self) -> &PhysicalCommonExtra { + match self { + PhysicalExtra::Stream(stream) => &stream.physical, + PhysicalExtra::Batch(batch) => &batch.physical, + } + } + + fn common_mut(&mut self) -> &mut PhysicalCommonExtra { + match self { + PhysicalExtra::Stream(stream) => &mut stream.physical, + PhysicalExtra::Batch(batch) => &mut batch.physical, + } + } + + fn stream(&self) -> &StreamExtra { + match self { + PhysicalExtra::Stream(extra) => extra, + _ => panic!("access stream properties from batch plan node"), + } + } + + fn batch(&self) -> &BatchExtra { + match self { + PhysicalExtra::Batch(extra) => extra, + _ => panic!("access batch properties from stream plan node"), + } + } +} + +/// the common fields of all nodes, please make a field named `base` in +/// every planNode and correctly value it when construct the planNode. +/// +/// All fields are intentionally made private and immutable, as they should +/// normally be the same as the given [`GenericPlanNode`] when constructing. +/// +/// - To access them, use traits including [`GenericPlanRef`], +/// [`PhysicalPlanRef`], [`StreamPlanRef`] and [`BatchPlanRef`]. +/// - To mutate them, use methods like `new_*` or `clone_with_*`. +#[derive(Clone, Debug, Educe)] +#[educe(PartialEq, Eq, Hash)] +pub struct PlanBase { + // -- common fields -- + #[educe(PartialEq(ignore), Hash(ignore))] + id: PlanNodeId, + #[educe(PartialEq(ignore), Hash(ignore))] + ctx: OptimizerContextRef, + + schema: Schema, + /// the pk indices of the PlanNode's output, a empty stream key vec means there is no stream key + // TODO: this is actually a logical and stream only property + stream_key: Option>, + functional_dependency: FunctionalDependencySet, + + /// Extra fields if the plan node is physical. + physical_extra: Option, +} + +impl PlanBase { + fn physical_extra(&self) -> &PhysicalExtra { + self.physical_extra + .as_ref() + .expect("access physical properties from logical plan node") + } + + fn physical_extra_mut(&mut self) -> &mut PhysicalExtra { + self.physical_extra + .as_mut() + .expect("access physical properties from logical plan node") + } } impl generic::GenericPlanRef for PlanBase { + fn id(&self) -> PlanNodeId { + self.id + } + fn schema(&self) -> &Schema { &self.schema } - fn logical_pk(&self) -> &[usize] { - &self.logical_pk + fn stream_key(&self) -> Option<&[usize]> { + self.stream_key.as_deref() } fn ctx(&self) -> OptimizerContextRef { @@ -72,80 +157,63 @@ impl generic::GenericPlanRef for PlanBase { } } -impl stream::StreamPlanRef for PlanBase { +impl generic::PhysicalPlanRef for PlanBase { fn distribution(&self) -> &Distribution { - &self.dist + &self.physical_extra().common().dist } +} +impl stream::StreamPlanRef for PlanBase { fn append_only(&self) -> bool { - self.append_only + self.physical_extra().stream().append_only } fn emit_on_window_close(&self) -> bool { - self.emit_on_window_close + self.physical_extra().stream().emit_on_window_close + } + + fn watermark_columns(&self) -> &FixedBitSet { + &self.physical_extra().stream().watermark_columns } } + impl batch::BatchPlanRef for PlanBase { fn order(&self) -> &Order { - &self.order + &self.physical_extra().batch().order } } + impl PlanBase { pub fn new_logical( ctx: OptimizerContextRef, schema: Schema, - logical_pk: Vec, + stream_key: Option>, functional_dependency: FunctionalDependencySet, ) -> Self { let id = ctx.next_plan_node_id(); - let watermark_columns = FixedBitSet::with_capacity(schema.len()); Self { id, ctx, schema, - logical_pk, - dist: Distribution::Single, - order: Order::any(), - // Logical plan node won't touch `append_only` field - append_only: true, - emit_on_window_close: false, + stream_key, functional_dependency, - watermark_columns, + physical_extra: None, } } - pub fn new_logical_with_core(node: &impl GenericPlanNode) -> Self { + pub fn new_logical_with_core(core: &impl GenericPlanNode) -> Self { Self::new_logical( - node.ctx(), - node.schema(), - node.logical_pk().unwrap_or_default(), - node.functional_dependency(), - ) - } - - pub fn new_stream_with_logical( - logical: &impl GenericPlanNode, - dist: Distribution, - append_only: bool, - emit_on_window_close: bool, - watermark_columns: FixedBitSet, - ) -> Self { - Self::new_stream( - logical.ctx(), - logical.schema(), - logical.logical_pk().unwrap_or_default().to_vec(), - logical.functional_dependency(), - dist, - append_only, - emit_on_window_close, - watermark_columns, + core.ctx(), + core.schema(), + core.stream_key(), + core.functional_dependency(), ) } pub fn new_stream( ctx: OptimizerContextRef, schema: Schema, - logical_pk: Vec, + stream_key: Option>, functional_dependency: FunctionalDependencySet, dist: Distribution, append_only: bool, @@ -158,22 +226,36 @@ impl PlanBase { id, ctx, schema, - dist, - order: Order::any(), - logical_pk, - append_only, - emit_on_window_close, + stream_key, functional_dependency, - watermark_columns, + physical_extra: Some(PhysicalExtra::Stream({ + StreamExtra { + physical: PhysicalCommonExtra { dist }, + append_only, + emit_on_window_close, + watermark_columns, + } + })), } } - pub fn new_batch_from_logical( - logical: &impl GenericPlanNode, + pub fn new_stream_with_core( + core: &impl GenericPlanNode, dist: Distribution, - order: Order, + append_only: bool, + emit_on_window_close: bool, + watermark_columns: FixedBitSet, ) -> Self { - Self::new_batch(logical.ctx(), logical.schema(), dist, order) + Self::new_stream( + core.ctx(), + core.schema(), + core.stream_key(), + core.functional_dependency(), + dist, + append_only, + emit_on_window_close, + watermark_columns, + ) } pub fn new_batch( @@ -184,75 +266,49 @@ impl PlanBase { ) -> Self { let id = ctx.next_plan_node_id(); let functional_dependency = FunctionalDependencySet::new(schema.len()); - let watermark_columns = FixedBitSet::with_capacity(schema.len()); Self { id, ctx, schema, - dist, - order, - logical_pk: vec![], - // Batch plan node won't touch `append_only` field - append_only: true, - emit_on_window_close: false, // TODO(rc): batch EOWC support? + stream_key: None, functional_dependency, - watermark_columns, + physical_extra: Some(PhysicalExtra::Batch({ + BatchExtra { + physical: PhysicalCommonExtra { dist }, + order, + } + })), } } - pub fn derive_stream_plan_base(plan_node: &PlanRef) -> Self { - PlanBase::new_stream( - plan_node.ctx(), - plan_node.schema().clone(), - plan_node.logical_pk().to_vec(), - plan_node.functional_dependency().clone(), - plan_node.distribution().clone(), - plan_node.append_only(), - plan_node.emit_on_window_close(), - plan_node.watermark_columns().clone(), - ) + pub fn new_batch_with_core( + core: &impl GenericPlanNode, + dist: Distribution, + order: Order, + ) -> Self { + Self::new_batch(core.ctx(), core.schema(), dist, order) } pub fn clone_with_new_plan_id(&self) -> Self { let mut new = self.clone(); - new.id = self.ctx.next_plan_node_id(); + new.id = self.ctx().next_plan_node_id(); + new + } + + /// Clone the plan node with a new distribution. + /// + /// Panics if the plan node is not physical. + pub fn clone_with_new_distribution(&self, dist: Distribution) -> Self { + let mut new = self.clone(); + new.physical_extra_mut().common_mut().dist = dist; new } } -macro_rules! impl_base_delegate { - ($( { $convention:ident, $name:ident }),*) => { - $(paste! { - impl [<$convention $name>] { - pub fn id(&self) -> PlanNodeId { - self.plan_base().id - } - pub fn ctx(&self) -> OptimizerContextRef { - self.plan_base().ctx() - } - pub fn schema(&self) -> &Schema { - &self.plan_base().schema - } - pub fn logical_pk(&self) -> &[usize] { - &self.plan_base().logical_pk - } - pub fn order(&self) -> &Order { - &self.plan_base().order - } - pub fn distribution(&self) -> &Distribution { - &self.plan_base().dist - } - pub fn append_only(&self) -> bool { - self.plan_base().append_only - } - pub fn emit_on_window_close(&self) -> bool { - self.plan_base().emit_on_window_close - } - pub fn functional_dependency(&self) -> &FunctionalDependencySet { - &self.plan_base().functional_dependency - } - } - })* +// Mutators for testing only. +#[cfg(test)] +impl PlanBase { + pub fn functional_dependency_mut(&mut self) -> &mut FunctionalDependencySet { + &mut self.functional_dependency } } -for_all_plan_nodes! { impl_base_delegate } diff --git a/src/frontend/src/optimizer/plan_node/plan_tree_node.rs b/src/frontend/src/optimizer/plan_node/plan_tree_node.rs index e1435a6b7b20d..0c46d91f7a566 100644 --- a/src/frontend/src/optimizer/plan_node/plan_tree_node.rs +++ b/src/frontend/src/optimizer/plan_node/plan_tree_node.rs @@ -109,19 +109,6 @@ macro_rules! impl_plan_tree_node_for_leaf { self.clone().into() } } - - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $leaf_node_type { - type PlanRef = crate::optimizer::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[crate::optimizer::PlanRef; 2]> { - smallvec::smallvec![] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - assert!(inputs.next().is_none(), "expect exactly no input"); - self.clone() - } - } }; } @@ -141,20 +128,6 @@ macro_rules! impl_plan_tree_node_for_unary { self.clone_with_input(inputs[0].clone()).into() } } - - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $unary_node_type { - type PlanRef = crate::optimizer::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[crate::optimizer::PlanRef; 2]> { - smallvec::smallvec![self.input()] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - let input = inputs.next().expect("expect exactly 1 input"); - assert!(inputs.next().is_none(), "expect exactly 1 input"); - self.clone_with_input(input).into() - } - } }; } @@ -174,19 +147,5 @@ macro_rules! impl_plan_tree_node_for_binary { .into() } } - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $binary_node_type { - type PlanRef = crate::optimizer::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[crate::optimizer::PlanRef; 2]> { - smallvec::smallvec![self.left(), self.right()] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - let left = inputs.next().expect("expect exactly 2 input"); - let right = inputs.next().expect("expect exactly 2 input"); - assert!(inputs.next().is_none(), "expect exactly 2 input"); - self.clone_with_left_right(left, right).into() - } - } }; } diff --git a/src/frontend/src/optimizer/plan_node/plan_tree_node_v2.rs b/src/frontend/src/optimizer/plan_node/plan_tree_node_v2.rs deleted file mode 100644 index e598c7dd61caa..0000000000000 --- a/src/frontend/src/optimizer/plan_node/plan_tree_node_v2.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use smallvec::SmallVec; - -pub trait PlanTreeNodeV2 { - type PlanRef; - - fn inputs(&self) -> SmallVec<[Self::PlanRef; 2]>; - fn clone_with_inputs(&self, inputs: impl Iterator) -> Self; -} - -macro_rules! impl_plan_tree_node_v2_for_stream_leaf_node { - ($node_type:ident) => { - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $node_type { - type PlanRef = crate::optimizer::plan_node::stream::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[Self::PlanRef; 2]> { - smallvec::smallvec![] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - assert!(inputs.next().is_none(), "expect exactly no input"); - self.clone() - } - } - }; -} - -macro_rules! impl_plan_tree_node_v2_for_stream_unary_node { - ($node_type:ident, $input_field:ident) => { - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $node_type { - type PlanRef = crate::optimizer::plan_node::stream::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[Self::PlanRef; 2]> { - smallvec::smallvec![self.$input_field.clone()] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - let mut new = self.clone(); - new.$input_field = inputs.next().expect("expect exactly 1 input"); - assert!(inputs.next().is_none(), "expect exactly 1 input"); - new.clone() - } - } - }; -} - -// macro_rules! impl_plan_tree_node_v2_for_stream_binary_node { -// ($node_type:ident, $first_input_field:ident, $second_input_field:ident) => { -// impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $node_type { -// type PlanRef = crate::optimizer::plan_node::stream::PlanRef; - -// fn inputs(&self) -> smallvec::SmallVec<[Self::PlanRef; 2]> { -// smallvec::smallvec![ -// self.$first_input_field.clone(), -// self.$second_input_field.clone() -// ] -// } - -// fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self -// { let mut new = self.clone(); -// new.$first_input_field = inputs.next().expect("expect exactly 2 input"); -// new.$second_input_field = inputs.next().expect("expect exactly 2 input"); -// assert!(inputs.next().is_none(), "expect exactly 2 input"); -// new.clone() -// } -// } -// }; -// } - -macro_rules! impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating { - ($node_type:ident, $core_field:ident, $input_field:ident) => { - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $node_type { - type PlanRef = crate::optimizer::plan_node::stream::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[Self::PlanRef; 2]> { - smallvec::smallvec![self.$core_field.$input_field.clone()] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - let mut new = self.clone(); - new.$core_field.$input_field = inputs.next().expect("expect exactly 1 input"); - assert!(inputs.next().is_none(), "expect exactly 1 input"); - new.clone() - } - } - }; -} - -macro_rules! impl_plan_tree_node_v2_for_stream_binary_node_with_core_delegating { - ($node_type:ident, $core_field:ident, $first_input_field:ident, $second_input_field:ident) => { - impl crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2 for $node_type { - type PlanRef = crate::optimizer::plan_node::stream::PlanRef; - - fn inputs(&self) -> smallvec::SmallVec<[Self::PlanRef; 2]> { - smallvec::smallvec![ - self.$core_field.$first_input_field.clone(), - self.$core_field.$second_input_field.clone() - ] - } - - fn clone_with_inputs(&self, mut inputs: impl Iterator) -> Self { - let mut new = self.clone(); - new.$core_field.$first_input_field = inputs.next().expect("expect exactly 2 input"); - new.$core_field.$second_input_field = - inputs.next().expect("expect exactly 2 input"); - assert!(inputs.next().is_none(), "expect exactly 2 input"); - new.clone() - } - } - }; -} diff --git a/src/frontend/src/optimizer/plan_node/stream.rs b/src/frontend/src/optimizer/plan_node/stream.rs index e9de735718a07..866c62c2413a5 100644 --- a/src/frontend/src/optimizer/plan_node/stream.rs +++ b/src/frontend/src/optimizer/plan_node/stream.rs @@ -12,768 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use educe::Educe; -use generic::PlanAggCall; -use itertools::Itertools; -use pb::stream_node as pb_node; -use risingwave_common::catalog::{ColumnDesc, Field, Schema}; -use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use risingwave_connector::sink::catalog::desc::SinkDesc; -use risingwave_pb::stream_plan as pb; -use smallvec::SmallVec; +use fixedbitset::FixedBitSet; -use super::generic::{GenericPlanNode, GenericPlanRef}; -use super::utils::TableCatalogBuilder; -use super::{generic, EqJoinPredicate, PlanNodeId}; -use crate::expr::{Expr, ExprImpl}; -use crate::optimizer::optimizer_context::OptimizerContextRef; -use crate::optimizer::plan_node::plan_tree_node_v2::PlanTreeNodeV2; -use crate::optimizer::property::{Distribution, FunctionalDependencySet}; -use crate::stream_fragmenter::BuildFragmentGraphState; -use crate::TableCatalog; +use super::generic::PhysicalPlanRef; -macro_rules! impl_node { -($base:ident, $($t:ident),*) => { - #[derive(Debug, Clone, PartialEq, Eq, Hash)] - pub enum Node { - $($t(Box<$t>),)* - } - pub type PlanOwned = ($base, Node); - pub type PlanRef = std::rc::Rc; - $( - impl From<$t> for PlanRef { - fn from(o: $t) -> PlanRef { - std::rc::Rc::new((o.to_stream_base(), Node::$t(Box::new(o)))) - } - } - )* - impl PlanTreeNodeV2 for PlanRef { - type PlanRef = PlanRef; - - fn inputs(&self) -> SmallVec<[Self::PlanRef; 2]> { - match &self.1 { - $(Node::$t(inner) => inner.inputs(),)* - } - } - fn clone_with_inputs(&self, inputs: impl Iterator) -> Self { - match &self.1 { - $(Node::$t(inner) => inner.clone_with_inputs(inputs).into(),)* - } - } - - } -}; -} - -pub trait StreamPlanNode: GenericPlanNode { - fn distribution(&self) -> Distribution; - fn append_only(&self) -> bool; - fn emit_on_window_close(&self) -> bool; - fn to_stream_base(&self) -> PlanBase { - let ctx = self.ctx(); - PlanBase { - id: ctx.next_plan_node_id(), - ctx, - schema: self.schema(), - logical_pk: self.logical_pk().unwrap_or_default(), - dist: self.distribution(), - append_only: self.append_only(), - emit_on_window_close: self.emit_on_window_close(), - } - } -} - -pub trait StreamPlanRef: GenericPlanRef { - fn distribution(&self) -> &Distribution; - fn append_only(&self) -> bool; - fn emit_on_window_close(&self) -> bool; -} - -impl generic::GenericPlanRef for PlanRef { - fn schema(&self) -> &Schema { - &self.0.schema - } - - fn logical_pk(&self) -> &[usize] { - &self.0.logical_pk - } - - fn ctx(&self) -> OptimizerContextRef { - self.0.ctx.clone() - } - - fn functional_dependency(&self) -> &FunctionalDependencySet { - self.0.functional_dependency() - } -} - -impl generic::GenericPlanRef for PlanBase { - fn schema(&self) -> &Schema { - &self.schema - } - - fn logical_pk(&self) -> &[usize] { - &self.logical_pk - } - - fn ctx(&self) -> OptimizerContextRef { - self.ctx.clone() - } - - fn functional_dependency(&self) -> &FunctionalDependencySet { - todo!() - } -} - -impl StreamPlanRef for PlanBase { - fn distribution(&self) -> &Distribution { - &self.dist - } - - fn append_only(&self) -> bool { - self.append_only - } - - fn emit_on_window_close(&self) -> bool { - self.emit_on_window_close - } -} - -impl StreamPlanRef for PlanRef { - fn distribution(&self) -> &Distribution { - &self.0.dist - } - - fn append_only(&self) -> bool { - self.0.append_only - } - - fn emit_on_window_close(&self) -> bool { - self.0.emit_on_window_close - } -} - -/// Implements [`generic::Join`] with delta join. It requires its two -/// inputs to be indexes. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct DeltaJoin { - pub core: generic::Join, - - /// The join condition must be equivalent to `logical.on`, but separated into equal and - /// non-equal parts to facilitate execution later - pub eq_join_predicate: EqJoinPredicate, -} -impl_plan_tree_node_v2_for_stream_binary_node_with_core_delegating!(DeltaJoin, core, left, right); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct DynamicFilter { - pub core: generic::DynamicFilter, -} -impl_plan_tree_node_v2_for_stream_binary_node_with_core_delegating!( - DynamicFilter, - core, - left, - right -); -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Exchange { - pub dist: Distribution, - pub input: PlanRef, -} -impl_plan_tree_node_v2_for_stream_unary_node!(Exchange, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Expand { - pub core: generic::Expand, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(Expand, core, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Filter { - pub core: generic::Filter, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(Filter, core, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct SimpleAgg { - pub core: generic::Agg, - /// The index of `count(*)` in `agg_calls`. - row_count_idx: usize, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(SimpleAgg, core, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct GroupTopN { - pub core: generic::TopN, - /// an optional column index which is the vnode of each row computed by the input's consistent - /// hash distribution - pub vnode_col_idx: Option, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(GroupTopN, core, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct HashAgg { - pub core: generic::Agg, - /// An optional column index which is the vnode of each row computed by the input's consistent - /// hash distribution. - vnode_col_idx: Option, - /// The index of `count(*)` in `agg_calls`. - row_count_idx: usize, - /// Whether to emit output only when the window is closed by watermark. - emit_on_window_close: bool, - /// The watermark column that Emit-On-Window-Close behavior is based on. - window_col_idx: Option, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(HashAgg, core, input); - -/// Implements [`generic::Join`] with hash table. It builds a hash table -/// from inner (right-side) relation and probes with data from outer (left-side) relation to -/// get output rows. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct HashJoin { - pub core: generic::Join, - - /// The join condition must be equivalent to `logical.on`, but separated into equal and - /// non-equal parts to facilitate execution later - pub eq_join_predicate: EqJoinPredicate, - - /// Whether can optimize for append-only stream. - /// It is true if input of both side is append-only - pub is_append_only: bool, -} -impl_plan_tree_node_v2_for_stream_binary_node_with_core_delegating!(HashJoin, core, left, right); - -impl HashJoin { - /// Return hash join internal table catalog and degree table catalog. - pub fn infer_internal_and_degree_table_catalog( - input: &impl StreamPlanRef, - join_key_indices: Vec, - dk_indices_in_jk: Vec, - ) -> (TableCatalog, TableCatalog, Vec) { - let schema = input.schema(); - - let internal_table_dist_keys = dk_indices_in_jk - .iter() - .map(|idx| join_key_indices[*idx]) - .collect_vec(); - - let degree_table_dist_keys = dk_indices_in_jk.clone(); - - // The pk of hash join internal and degree table should be join_key + input_pk. - let join_key_len = join_key_indices.len(); - let mut pk_indices = join_key_indices; - - // dedup the pk in dist key.. - let mut deduped_input_pk_indices = vec![]; - for input_pk_idx in input.logical_pk() { - if !pk_indices.contains(input_pk_idx) - && !deduped_input_pk_indices.contains(input_pk_idx) - { - deduped_input_pk_indices.push(*input_pk_idx); - } - } - - pk_indices.extend(deduped_input_pk_indices.clone()); - - // Build internal table - let mut internal_table_catalog_builder = - TableCatalogBuilder::new(input.ctx().with_options().internal_table_subset()); - let internal_columns_fields = schema.fields().to_vec(); - - internal_columns_fields.iter().for_each(|field| { - internal_table_catalog_builder.add_column(field); - }); - pk_indices.iter().for_each(|idx| { - internal_table_catalog_builder.add_order_column(*idx, OrderType::ascending()) - }); - - // Build degree table. - let mut degree_table_catalog_builder = - TableCatalogBuilder::new(input.ctx().with_options().internal_table_subset()); - - let degree_column_field = Field::with_name(DataType::Int64, "_degree"); - - pk_indices.iter().enumerate().for_each(|(order_idx, idx)| { - degree_table_catalog_builder.add_column(&internal_columns_fields[*idx]); - degree_table_catalog_builder.add_order_column(order_idx, OrderType::ascending()); - }); - degree_table_catalog_builder.add_column(°ree_column_field); - degree_table_catalog_builder - .set_value_indices(vec![degree_table_catalog_builder.columns().len() - 1]); - - internal_table_catalog_builder.set_dist_key_in_pk(dk_indices_in_jk.clone()); - degree_table_catalog_builder.set_dist_key_in_pk(dk_indices_in_jk); - - ( - internal_table_catalog_builder.build(internal_table_dist_keys, join_key_len), - degree_table_catalog_builder.build(degree_table_dist_keys, join_key_len), - deduped_input_pk_indices, - ) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct HopWindow { - pub core: generic::HopWindow, - window_start_exprs: Vec, - window_end_exprs: Vec, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(HopWindow, core, input); - -/// [`IndexScan`] is a virtual plan node to represent a stream table scan. It will be converted -/// to chain + merge node (for upstream materialize) + batch table scan when converting to `MView` -/// creation request. Compared with [`TableScan`], it will reorder columns, and the chain node -/// doesn't allow rearrange. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct IndexScan { - pub core: generic::Scan, - pub batch_plan_id: PlanNodeId, -} -impl_plan_tree_node_v2_for_stream_leaf_node!(IndexScan); - -/// Stateless simple agg. +/// A subtrait of [`PhysicalPlanRef`] for stream plans. /// -/// Should only be used for stateless agg, including `sum`, `count` and *append-only* `min`/`max`. +/// Due to the lack of refactoring, all plan nodes currently implement this trait +/// through [`super::PlanBase`]. One may still use this trait as a bound for +/// accessing a stream plan, in contrast to [`GenericPlanRef`] or +/// [`PhysicalPlanRef`]. /// -/// The output of `StatelessSimpleAgg` doesn't have pk columns, so the result can only be used by -/// `SimpleAgg` with `ManagedValueState`s. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct StatelessSimpleAgg { - pub core: generic::Agg, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(StatelessSimpleAgg, core, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Materialize { - /// Child of Materialize plan - pub input: PlanRef, - pub table: TableCatalog, -} -impl_plan_tree_node_v2_for_stream_unary_node!(Materialize, input); - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct ProjectSet { - pub core: generic::ProjectSet, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(ProjectSet, core, input); - -/// `Project` implements [`super::LogicalProject`] to evaluate specified expressions on input -/// rows. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Project { - pub core: generic::Project, - watermark_derivations: Vec<(usize, usize)>, - nondecreasing_exprs: Vec, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(Project, core, input); - -/// [`Sink`] represents a table/connector sink at the very end of the graph. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Sink { - pub input: PlanRef, - pub sink_desc: SinkDesc, -} -impl_plan_tree_node_v2_for_stream_unary_node!(Sink, input); -/// [`Source`] represents a table/connector source at the very beginning of the graph. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Source { - pub core: generic::Source, -} -impl_plan_tree_node_v2_for_stream_leaf_node!(Source); - -/// `TableScan` is a virtual plan node to represent a stream table scan. It will be converted -/// to chain + merge node (for upstream materialize) + batch table scan when converting to `MView` -/// creation request. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct TableScan { - pub core: generic::Scan, - pub batch_plan_id: PlanNodeId, -} -impl_plan_tree_node_v2_for_stream_leaf_node!(TableScan); - -/// `TopN` implements [`super::LogicalTopN`] to find the top N elements with a heap -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct TopN { - pub core: generic::TopN, -} -impl_plan_tree_node_v2_for_stream_unary_node_with_core_delegating!(TopN, core, input); - -#[derive(Clone, Debug, Educe)] -#[educe(PartialEq, Eq, Hash)] -pub struct PlanBase { - #[educe(PartialEq(ignore))] - #[educe(Hash(ignore))] - pub id: PlanNodeId, - #[educe(PartialEq(ignore))] - #[educe(Hash(ignore))] - pub ctx: OptimizerContextRef, - pub schema: Schema, - pub logical_pk: Vec, - #[educe(PartialEq(ignore))] - #[educe(Hash(ignore))] - pub dist: Distribution, - pub append_only: bool, - pub emit_on_window_close: bool, -} - -impl_node!( - PlanBase, - Exchange, - DynamicFilter, - DeltaJoin, - Expand, - Filter, - SimpleAgg, - GroupTopN, - HashAgg, - HashJoin, - HopWindow, - IndexScan, - StatelessSimpleAgg, - Materialize, - ProjectSet, - Project, - Sink, - Source, - TableScan, - TopN -); - -use pb_node::PbNodeBody; -#[allow(dead_code)] -pub fn to_stream_prost_body( - (base, core): &PlanOwned, - state: &mut BuildFragmentGraphState, -) -> PbNodeBody { - use pb::*; - match core { - Node::TableScan(_) => todo!(), - Node::IndexScan(_) => todo!(), - // ^ need standalone implementations - Node::Exchange(_) => PbNodeBody::Exchange(ExchangeNode { - strategy: Some(DispatchStrategy { - r#type: match &base.dist { - Distribution::HashShard(_) => DispatcherType::Hash, - Distribution::Single => DispatcherType::Simple, - Distribution::Broadcast => DispatcherType::Broadcast, - _ => panic!("Do not allow Any or AnyShard in serialization process"), - } as i32, - dist_key_indices: match &base.dist { - Distribution::HashShard(keys) => keys.iter().map(|&num| num as u32).collect(), - _ => vec![], - }, - output_indices: (0..base.schema().len() as u32).collect(), - }), - }), - Node::DynamicFilter(me) => { - use generic::dynamic_filter::*; - let me = &me.core; - let condition = me - .predicate() - .as_expr_unless_true() - .map(|x| x.to_expr_proto()); - let left_table = infer_left_internal_table_catalog(base, me.left_index()) - .with_id(state.gen_table_id_wrapped()); - let right_table = infer_right_internal_table_catalog(&me.right().0) - .with_id(state.gen_table_id_wrapped()); - PbNodeBody::DynamicFilter(DynamicFilterNode { - left_key: me.left_index() as u32, - condition, - left_table: Some(left_table.to_internal_table_prost()), - right_table: Some(right_table.to_internal_table_prost()), - }) - } - Node::DeltaJoin(me) => { - let (_, left_node) = &*me.core.left; - let (_, right_node) = &*me.core.right; - fn cast(node: &Node) -> &IndexScan { - match node { - Node::IndexScan(scan) => scan, - _ => unreachable!(), - } - } - let left_table = cast(left_node); - let right_table = cast(right_node); - let left_table_desc = &*left_table.core.table_desc; - let right_table_desc = &*right_table.core.table_desc; - - // TODO: add a separate delta join node in proto, or move fragmenter to frontend so that - // we don't need an intermediate representation. - PbNodeBody::DeltaIndexJoin(DeltaIndexJoinNode { - join_type: me.core.join_type as i32, - left_key: me - .eq_join_predicate - .left_eq_indexes() - .iter() - .map(|v| *v as i32) - .collect(), - right_key: me - .eq_join_predicate - .right_eq_indexes() - .iter() - .map(|v| *v as i32) - .collect(), - condition: me - .eq_join_predicate - .other_cond() - .as_expr_unless_true() - .map(|x| x.to_expr_proto()), - left_table_id: left_table_desc.table_id.table_id(), - right_table_id: right_table_desc.table_id.table_id(), - left_info: Some(ArrangementInfo { - arrange_key_orders: left_table_desc.arrange_key_orders_protobuf(), - column_descs: left_table - .core - .column_descs() - .iter() - .map(ColumnDesc::to_protobuf) - .collect(), - table_desc: Some(left_table_desc.to_protobuf()), - }), - right_info: Some(ArrangementInfo { - arrange_key_orders: right_table_desc.arrange_key_orders_protobuf(), - column_descs: right_table - .core - .column_descs() - .iter() - .map(ColumnDesc::to_protobuf) - .collect(), - table_desc: Some(right_table_desc.to_protobuf()), - }), - output_indices: me.core.output_indices.iter().map(|&x| x as u32).collect(), - }) - } - Node::Expand(me) => { - use pb::expand_node::Subset; - - let me = &me.core; - PbNodeBody::Expand(ExpandNode { - column_subsets: me - .column_subsets - .iter() - .map(|subset| { - let column_indices = subset.iter().map(|&key| key as u32).collect(); - Subset { column_indices } - }) - .collect(), - }) - } - Node::Filter(me) => { - let me = &me.core; - PbNodeBody::Filter(FilterNode { - search_condition: Some(ExprImpl::from(me.predicate.clone()).to_expr_proto()), - }) - } - Node::SimpleAgg(me) => { - let result_table = me.core.infer_result_table(base, None, None); - let agg_states = me.core.infer_stream_agg_state(base, None, None); - let distinct_dedup_tables = me.core.infer_distinct_dedup_tables(base, None, None); - - PbNodeBody::SimpleAgg(SimpleAggNode { - agg_calls: me - .core - .agg_calls - .iter() - .map(PlanAggCall::to_protobuf) - .collect(), - row_count_index: me.row_count_idx as u32, - distribution_key: base - .dist - .dist_column_indices() - .iter() - .map(|&idx| idx as u32) - .collect(), - is_append_only: me.core.input.0.append_only, - agg_call_states: agg_states - .into_iter() - .map(|s| s.into_prost(state)) - .collect(), - result_table: Some( - result_table - .with_id(state.gen_table_id_wrapped()) - .to_internal_table_prost(), - ), - distinct_dedup_tables: distinct_dedup_tables - .into_iter() - .map(|(key_idx, table)| (key_idx as u32, table.to_internal_table_prost())) - .collect(), - }) - } - Node::GroupTopN(me) => { - let input = &me.core.input.0; - let table = me - .core - .infer_internal_table_catalog( - input.schema(), - input.ctx(), - input.logical_pk(), - me.vnode_col_idx, - ) - .with_id(state.gen_table_id_wrapped()); - let group_topn_node = GroupTopNNode { - limit: me.core.limit_attr.limit(), - offset: me.core.offset, - with_ties: me.core.limit_attr.with_ties(), - group_key: me.core.group_key.iter().map(|idx| *idx as u32).collect(), - table: Some(table.to_internal_table_prost()), - order_by: me.core.order.to_protobuf(), - }; - - PbNodeBody::GroupTopN(group_topn_node) - } - Node::HashAgg(me) => { - let result_table = - me.core - .infer_result_table(base, me.vnode_col_idx, me.window_col_idx); - let agg_states = - me.core - .infer_stream_agg_state(base, me.vnode_col_idx, me.window_col_idx); - let distinct_dedup_tables = - me.core - .infer_distinct_dedup_tables(base, me.vnode_col_idx, me.window_col_idx); - - PbNodeBody::HashAgg(HashAggNode { - group_key: me.core.group_key.indices().map(|idx| idx as u32).collect(), - agg_calls: me - .core - .agg_calls - .iter() - .map(PlanAggCall::to_protobuf) - .collect(), - row_count_index: me.row_count_idx as u32, - is_append_only: me.core.input.0.append_only, - agg_call_states: agg_states - .into_iter() - .map(|s| s.into_prost(state)) - .collect(), - result_table: Some( - result_table - .with_id(state.gen_table_id_wrapped()) - .to_internal_table_prost(), - ), - distinct_dedup_tables: distinct_dedup_tables - .into_iter() - .map(|(key_idx, table)| (key_idx as u32, table.to_internal_table_prost())) - .collect(), - emit_on_window_close: me.emit_on_window_close(), - }) - } - Node::HashJoin(_) => { - unreachable!(); - } - Node::HopWindow(me) => { - let window_start_exprs = me - .window_start_exprs - .clone() - .iter() - .map(|x| x.to_expr_proto()) - .collect(); - let window_end_exprs = me - .window_end_exprs - .clone() - .iter() - .map(|x| x.to_expr_proto()) - .collect(); - let me = &me.core; - PbNodeBody::HopWindow(HopWindowNode { - time_col: me.time_col.index() as _, - window_slide: Some(me.window_slide.into()), - window_size: Some(me.window_size.into()), - output_indices: me.output_indices.iter().map(|&x| x as u32).collect(), - window_start_exprs, - window_end_exprs, - }) - } - Node::StatelessSimpleAgg(me) => { - let me = &me.core; - PbNodeBody::StatelessSimpleAgg(SimpleAggNode { - agg_calls: me.agg_calls.iter().map(PlanAggCall::to_protobuf).collect(), - row_count_index: u32::MAX, // this is not used - distribution_key: base - .dist - .dist_column_indices() - .iter() - .map(|&idx| idx as u32) - .collect(), - agg_call_states: vec![], - result_table: None, - is_append_only: me.input.0.append_only, - distinct_dedup_tables: Default::default(), - }) - } - Node::Materialize(me) => { - PbNodeBody::Materialize(MaterializeNode { - // We don't need table id for materialize node in frontend. The id will be generated - // on meta catalog service. - table_id: 0, - column_orders: me.table.pk().iter().map(ColumnOrder::to_protobuf).collect(), - table: Some(me.table.to_internal_table_prost()), - }) - } - Node::ProjectSet(_) => { - unreachable!() - } - Node::Project(me) => PbNodeBody::Project(ProjectNode { - select_list: me.core.exprs.iter().map(|x| x.to_expr_proto()).collect(), - watermark_input_cols: me - .watermark_derivations - .iter() - .map(|(x, _)| *x as u32) - .collect(), - watermark_output_cols: me - .watermark_derivations - .iter() - .map(|(_, y)| *y as u32) - .collect(), - nondecreasing_exprs: me.nondecreasing_exprs.iter().map(|i| *i as u32).collect(), - }), - Node::Sink(me) => PbNodeBody::Sink(SinkNode { - sink_desc: Some(me.sink_desc.to_proto()), - table: None, // TODO: Refactor sink to have a generic core. - log_store_type: SinkLogStoreType::InMemoryLogStore as i32, - }), - Node::Source(me) => { - let me = &me.core.catalog; - let source_inner = me.as_ref().map(|me| StreamSource { - source_id: me.id, - source_name: me.name.clone(), - state_table: Some( - generic::Source::infer_internal_table_catalog() - .with_id(state.gen_table_id_wrapped()) - .to_internal_table_prost(), - ), - info: Some(me.info.clone()), - row_id_index: me.row_id_index.map(|index| index as _), - columns: me.columns.iter().map(|c| c.to_protobuf()).collect(), - properties: me.properties.clone().into_iter().collect(), - }); - PbNodeBody::Source(SourceNode { source_inner }) - } - Node::TopN(me) => { - let input = &me.core.input.0; - let me = &me.core; - let topn_node = TopNNode { - limit: me.limit_attr.limit(), - offset: me.offset, - with_ties: me.limit_attr.with_ties(), - table: Some( - me.infer_internal_table_catalog( - input.schema(), - input.ctx(), - input.logical_pk(), - None, - ) - .with_id(state.gen_table_id_wrapped()) - .to_internal_table_prost(), - ), - order_by: me.order.to_protobuf(), - }; - if me.input.0.append_only { - PbNodeBody::AppendOnlyTopN(topn_node) - } else { - PbNodeBody::TopN(topn_node) - } - } - } +/// [`GenericPlanRef`]: super::generic::GenericPlanRef +pub trait StreamPlanRef: PhysicalPlanRef { + fn append_only(&self) -> bool; + fn emit_on_window_close(&self) -> bool; + fn watermark_columns(&self) -> &FixedBitSet; } diff --git a/src/frontend/src/optimizer/plan_node/stream_dedup.rs b/src/frontend/src/optimizer/plan_node/stream_dedup.rs index 44acf722eae6b..51b5e589e886e 100644 --- a/src/frontend/src/optimizer/plan_node/stream_dedup.rs +++ b/src/frontend/src/optimizer/plan_node/stream_dedup.rs @@ -17,10 +17,10 @@ use risingwave_common::util::sort_util::OrderType; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::DedupNode; -use super::generic::{self, GenericPlanNode, GenericPlanRef}; +use super::generic::{self, GenericPlanNode, GenericPlanRef, PhysicalPlanRef}; +use super::stream::StreamPlanRef; use super::utils::{impl_distill_by_unit, TableCatalogBuilder}; use super::{ExprRewritable, PlanBase, PlanTreeNodeUnary, StreamNode}; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::PlanRef; use crate::stream_fragmenter::BuildFragmentGraphState; use crate::TableCatalog; @@ -28,27 +28,27 @@ use crate::TableCatalog; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamDedup { pub base: PlanBase, - logical: generic::Dedup, + core: generic::Dedup, } impl StreamDedup { - pub fn new(logical: generic::Dedup) -> Self { - let input = logical.input.clone(); + pub fn new(core: generic::Dedup) -> Self { + let input = core.input.clone(); // A dedup operator must be append-only. assert!(input.append_only()); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, input.distribution().clone(), true, input.emit_on_window_close(), input.watermark_columns().clone(), ); - StreamDedup { base, logical } + StreamDedup { base, core } } pub fn infer_internal_table_catalog(&self) -> TableCatalog { - let schema = self.logical.schema(); + let schema = self.core.schema(); let mut builder = TableCatalogBuilder::new(self.base.ctx().with_options().internal_table_subset()); @@ -56,7 +56,7 @@ impl StreamDedup { builder.add_column(field); }); - self.logical.dedup_cols.iter().for_each(|idx| { + self.core.dedup_cols.iter().for_each(|idx| { builder.add_order_column(*idx, OrderType::ascending()); }); @@ -70,17 +70,17 @@ impl StreamDedup { } // assert!(self.base.append_only()); -impl_distill_by_unit!(StreamDedup, logical, "StreamAppendOnlyDedup"); +impl_distill_by_unit!(StreamDedup, core, "StreamAppendOnlyDedup"); impl PlanTreeNodeUnary for StreamDedup { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -94,7 +94,7 @@ impl StreamNode for StreamDedup { PbNodeBody::AppendOnlyDedup(DedupNode { state_table: Some(table_catalog.to_internal_table_prost()), dedup_column_indices: self - .logical + .core .dedup_cols .iter() .map(|idx| *idx as _) diff --git a/src/frontend/src/optimizer/plan_node/stream_delta_join.rs b/src/frontend/src/optimizer/plan_node/stream_delta_join.rs index ede1ae68b0825..bb18f9cffdf0f 100644 --- a/src/frontend/src/optimizer/plan_node/stream_delta_join.rs +++ b/src/frontend/src/optimizer/plan_node/stream_delta_join.rs @@ -20,11 +20,10 @@ use risingwave_pb::plan_common::JoinType; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::{ArrangementInfo, DeltaIndexJoinNode}; -use super::generic::{self}; +use super::generic::{self, GenericPlanRef}; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeBinary, StreamNode}; use crate::expr::{Expr, ExprRewriter}; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::utils::IndicesDisplay; use crate::optimizer::plan_node::{EqJoinPredicate, EqJoinPredicateDisplay}; use crate::optimizer::property::Distribution; @@ -36,7 +35,7 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamDeltaJoin { pub base: PlanBase, - logical: generic::Join, + core: generic::Join, /// The join condition must be equivalent to `logical.on`, but separated into equal and /// non-equal parts to facilitate execution later @@ -44,10 +43,10 @@ pub struct StreamDeltaJoin { } impl StreamDeltaJoin { - pub fn new(logical: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { + pub fn new(core: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { // Inner join won't change the append-only behavior of the stream. The rest might. - let append_only = match logical.join_type { - JoinType::Inner => logical.left.append_only() && logical.right.append_only(), + let append_only = match core.join_type { + JoinType::Inner => core.left.append_only() && core.right.append_only(), _ => todo!("delta join only supports inner join for now"), }; if eq_join_predicate.has_non_eq() { @@ -58,18 +57,18 @@ impl StreamDeltaJoin { let dist = Distribution::SomeShard; let watermark_columns = { - let from_left = logical + let from_left = core .l2i_col_mapping() - .rewrite_bitset(logical.left.watermark_columns()); - let from_right = logical + .rewrite_bitset(core.left.watermark_columns()); + let from_right = core .r2i_col_mapping() - .rewrite_bitset(logical.right.watermark_columns()); + .rewrite_bitset(core.right.watermark_columns()); let watermark_columns = from_left.bitand(&from_right); - logical.i2o_col_mapping().rewrite_bitset(&watermark_columns) + core.i2o_col_mapping().rewrite_bitset(&watermark_columns) }; // TODO: derive from input - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, append_only, false, // TODO(rc): derive EOWC property from input @@ -78,7 +77,7 @@ impl StreamDeltaJoin { Self { base, - logical, + core, eq_join_predicate, } } @@ -91,11 +90,11 @@ impl StreamDeltaJoin { impl Distill for StreamDeltaJoin { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); - vec.push(("type", Pretty::debug(&self.logical.join_type))); + vec.push(("type", Pretty::debug(&self.core.join_type))); - let concat_schema = self.logical.concat_schema(); + let concat_schema = self.core.concat_schema(); vec.push(( "predicate", Pretty::debug(&EqJoinPredicateDisplay { @@ -105,7 +104,7 @@ impl Distill for StreamDeltaJoin { )); if verbose { - let data = IndicesDisplay::from_join(&self.logical, &concat_schema); + let data = IndicesDisplay::from_join(&self.core, &concat_schema); vec.push(("output", data)); } @@ -115,18 +114,18 @@ impl Distill for StreamDeltaJoin { impl PlanTreeNodeBinary for StreamDeltaJoin { fn left(&self) -> PlanRef { - self.logical.left.clone() + self.core.left.clone() } fn right(&self) -> PlanRef { - self.logical.right.clone() + self.core.right.clone() } fn clone_with_left_right(&self, left: PlanRef, right: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.left = left; - logical.right = right; - Self::new(logical, self.eq_join_predicate.clone()) + let mut core = self.core.clone(); + core.left = left; + core.right = right; + Self::new(core, self.eq_join_predicate.clone()) } } @@ -138,13 +137,13 @@ impl StreamNode for StreamDeltaJoin { let right = self.right(); let left_table = if let Some(stream_table_scan) = left.as_stream_table_scan() { - stream_table_scan.logical() + stream_table_scan.core() } else { unreachable!(); }; let left_table_desc = &*left_table.table_desc; let right_table = if let Some(stream_table_scan) = right.as_stream_table_scan() { - stream_table_scan.logical() + stream_table_scan.core() } else { unreachable!(); }; @@ -154,7 +153,7 @@ impl StreamNode for StreamDeltaJoin { // don't need an intermediate representation. let eq_join_predicate = &self.eq_join_predicate; NodeBody::DeltaIndexJoin(DeltaIndexJoinNode { - join_type: self.logical.join_type as i32, + join_type: self.core.join_type as i32, left_key: eq_join_predicate .left_eq_indexes() .iter() @@ -193,12 +192,7 @@ impl StreamNode for StreamDeltaJoin { .collect(), table_desc: Some(right_table_desc.to_protobuf()), }), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), }) } } @@ -209,8 +203,8 @@ impl ExprRewritable for StreamDeltaJoin { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.eq_join_predicate.rewrite_exprs(r)).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.eq_join_predicate.rewrite_exprs(r)).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_derive.rs b/src/frontend/src/optimizer/plan_node/stream_derive.rs deleted file mode 100644 index 404ca04a39fab..0000000000000 --- a/src/frontend/src/optimizer/plan_node/stream_derive.rs +++ /dev/null @@ -1,630 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use risingwave_common::catalog::Schema; - -use super::generic::GenericPlanNode; -use super::stream::*; -use crate::optimizer::optimizer_context::OptimizerContextRef; -use crate::optimizer::property::{Distribution, FunctionalDependencySet}; -use crate::utils::ColIndexMappingRewriteExt; - -impl GenericPlanNode for DynamicFilter { - fn schema(&self) -> Schema { - todo!("new plan node derivation") - } - - fn logical_pk(&self) -> Option> { - todo!("new plan node derivation") - } - - fn ctx(&self) -> OptimizerContextRef { - todo!("new plan node derivation") - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - todo!("new plan node derivation") - } -} -impl StreamPlanNode for DynamicFilter { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for Exchange { - fn schema(&self) -> Schema { - todo!("new plan node derivation") - } - - fn logical_pk(&self) -> Option> { - todo!("new plan node derivation") - } - - fn ctx(&self) -> OptimizerContextRef { - todo!("new plan node derivation") - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - todo!("new plan node derivation") - } -} - -impl StreamPlanNode for Exchange { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for DeltaJoin { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for DeltaJoin { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for Expand { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for Expand { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for Filter { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for Filter { - fn distribution(&self) -> Distribution { - self.core.input.distribution().clone() - } - - fn append_only(&self) -> bool { - self.core.input.append_only() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for SimpleAgg { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for SimpleAgg { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for GroupTopN { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for GroupTopN { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for HashAgg { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for HashAgg { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for HashJoin { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for HashJoin { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for HopWindow { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for HopWindow { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for IndexScan { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for IndexScan { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for StatelessSimpleAgg { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for StatelessSimpleAgg { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for Materialize { - fn schema(&self) -> Schema { - todo!("new plan node derivation") - } - - fn logical_pk(&self) -> Option> { - todo!("new plan node derivation") - } - - fn ctx(&self) -> OptimizerContextRef { - todo!("new plan node derivation") - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - todo!("new plan node derivation") - } -} - -impl StreamPlanNode for Materialize { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for ProjectSet { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for ProjectSet { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for Project { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for Project { - fn distribution(&self) -> Distribution { - self.core - .i2o_col_mapping() - .rewrite_provided_distribution(self.core.input.distribution()) - } - - fn append_only(&self) -> bool { - self.core.input.append_only() - } - - fn emit_on_window_close(&self) -> bool { - self.core.input.emit_on_window_close() - } -} - -impl GenericPlanNode for Sink { - fn schema(&self) -> Schema { - todo!("new plan node derivation") - } - - fn logical_pk(&self) -> Option> { - todo!("new plan node derivation") - } - - fn ctx(&self) -> OptimizerContextRef { - todo!("new plan node derivation") - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - todo!("new plan node derivation") - } -} - -impl StreamPlanNode for Sink { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for Source { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for Source { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for TableScan { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for TableScan { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} - -impl GenericPlanNode for TopN { - fn schema(&self) -> Schema { - self.core.schema() - } - - fn logical_pk(&self) -> Option> { - self.core.logical_pk() - } - - fn ctx(&self) -> OptimizerContextRef { - self.core.ctx() - } - - fn functional_dependency(&self) -> FunctionalDependencySet { - self.core.functional_dependency() - } -} - -impl StreamPlanNode for TopN { - fn distribution(&self) -> Distribution { - todo!() - } - - fn append_only(&self) -> bool { - todo!() - } - - fn emit_on_window_close(&self) -> bool { - todo!() - } -} diff --git a/src/frontend/src/optimizer/plan_node/stream_dml.rs b/src/frontend/src/optimizer/plan_node/stream_dml.rs index c576d5e2d83d3..9b000974786e4 100644 --- a/src/frontend/src/optimizer/plan_node/stream_dml.rs +++ b/src/frontend/src/optimizer/plan_node/stream_dml.rs @@ -17,6 +17,7 @@ use pretty_xmlish::{Pretty, XmlNode}; use risingwave_common::catalog::{ColumnDesc, INITIAL_TABLE_VERSION_ID}; use risingwave_pb::stream_plan::stream_node::PbNodeBody; +use super::stream::StreamPlanRef; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -33,7 +34,7 @@ impl StreamDml { let base = PlanBase::new_stream( input.ctx(), input.schema().clone(), - input.logical_pk().to_vec(), + input.stream_key().map(|v| v.to_vec()), input.functional_dependency().clone(), input.distribution().clone(), append_only, diff --git a/src/frontend/src/optimizer/plan_node/stream_dynamic_filter.rs b/src/frontend/src/optimizer/plan_node/stream_dynamic_filter.rs index e1ca18da937e9..a4b74f37208e7 100644 --- a/src/frontend/src/optimizer/plan_node/stream_dynamic_filter.rs +++ b/src/frontend/src/optimizer/plan_node/stream_dynamic_filter.rs @@ -17,7 +17,8 @@ pub use risingwave_pb::expr::expr_node::Type as ExprType; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::DynamicFilterNode; -use super::generic::DynamicFilter; +use super::generic::{DynamicFilter, GenericPlanRef}; +use super::stream::StreamPlanRef; use super::utils::{childless_record, column_names_pretty, watermark_pretty, Distill}; use super::{generic, ExprRewritable}; use crate::expr::{Expr, ExprImpl}; @@ -37,7 +38,7 @@ impl StreamDynamicFilter { let watermark_columns = core.watermark_columns(core.right().watermark_columns()[0]); // TODO: derive from input - let base = PlanBase::new_stream_with_logical( + let base = PlanBase::new_stream_with_core( &core, core.left().distribution().clone(), false, /* we can have a new abstraction for append only and monotonically increasing @@ -78,11 +79,11 @@ impl StreamDynamicFilter { impl Distill for StreamDynamicFilter { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let pred = self.core.pretty_field(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); vec.push(("predicate", pred)); - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { vec.push(("output_watermarks", ow)); } vec.push(("output", column_names_pretty(self.schema()))); diff --git a/src/frontend/src/optimizer/plan_node/stream_eowc_over_window.rs b/src/frontend/src/optimizer/plan_node/stream_eowc_over_window.rs index dea3d0eb49889..d8c5a9635ce59 100644 --- a/src/frontend/src/optimizer/plan_node/stream_eowc_over_window.rs +++ b/src/frontend/src/optimizer/plan_node/stream_eowc_over_window.rs @@ -18,51 +18,50 @@ use fixedbitset::FixedBitSet; use risingwave_common::util::sort_util::OrderType; use risingwave_pb::stream_plan::stream_node::PbNodeBody; -use super::generic::{self, PlanWindowFunction}; +use super::generic::{self, GenericPlanRef, PlanWindowFunction}; use super::utils::{impl_distill_by_unit, TableCatalogBuilder}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::stream_fragmenter::BuildFragmentGraphState; use crate::TableCatalog; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamEowcOverWindow { pub base: PlanBase, - logical: generic::OverWindow, + core: generic::OverWindow, } impl StreamEowcOverWindow { - pub fn new(logical: generic::OverWindow) -> Self { - assert!(logical.funcs_have_same_partition_and_order()); + pub fn new(core: generic::OverWindow) -> Self { + assert!(core.funcs_have_same_partition_and_order()); - let input = &logical.input; + let input = &core.input; assert!(input.append_only()); assert!(input.emit_on_window_close()); // Should order by a single watermark column. - let order_key = &logical.window_functions[0].order_by; + let order_key = &core.window_functions[0].order_by; assert_eq!(order_key.len(), 1); assert_eq!(order_key[0].order_type, OrderType::ascending()); let order_key_idx = order_key[0].column_index; - let input_watermark_cols = logical.input.watermark_columns(); + let input_watermark_cols = core.input.watermark_columns(); assert!(input_watermark_cols.contains(order_key_idx)); // `EowcOverWindowExecutor` cannot produce any watermark columns, because there may be some // ancient rows in some rarely updated partitions that are emitted at the end of time. - let watermark_columns = FixedBitSet::with_capacity(logical.output_len()); + let watermark_columns = FixedBitSet::with_capacity(core.output_len()); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, input.distribution().clone(), true, true, watermark_columns, ); - StreamEowcOverWindow { base, logical } + StreamEowcOverWindow { base, core } } fn window_functions(&self) -> &[PlanWindowFunction] { - &self.logical.window_functions + &self.core.window_functions } fn partition_key_indices(&self) -> Vec { @@ -80,7 +79,7 @@ impl StreamEowcOverWindow { fn infer_state_table(&self) -> TableCatalog { // The EOWC over window state table has the same schema as the input. - let in_fields = self.logical.input.schema().fields(); + let in_fields = self.core.input.schema().fields(); let mut tbl_builder = TableCatalogBuilder::new(self.ctx().with_options().internal_table_subset()); for field in in_fields { @@ -101,29 +100,29 @@ impl StreamEowcOverWindow { tbl_builder.add_order_column(order_key_index, OrderType::ascending()); order_cols.insert(order_key_index); } - for idx in self.logical.input.logical_pk() { + for idx in self.core.input.expect_stream_key() { if !order_cols.contains(idx) { tbl_builder.add_order_column(*idx, OrderType::ascending()); order_cols.insert(*idx); } } - let in_dist_key = self.logical.input.distribution().dist_column_indices(); + let in_dist_key = self.core.input.distribution().dist_column_indices(); tbl_builder.build(in_dist_key.to_vec(), read_prefix_len_hint) } } -impl_distill_by_unit!(StreamEowcOverWindow, logical, "StreamEowcOverWindow"); +impl_distill_by_unit!(StreamEowcOverWindow, core, "StreamEowcOverWindow"); impl PlanTreeNodeUnary for StreamEowcOverWindow { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { StreamEowcOverWindow } diff --git a/src/frontend/src/optimizer/plan_node/stream_exchange.rs b/src/frontend/src/optimizer/plan_node/stream_exchange.rs index b5da1804aee71..99e6c3c5161a1 100644 --- a/src/frontend/src/optimizer/plan_node/stream_exchange.rs +++ b/src/frontend/src/optimizer/plan_node/stream_exchange.rs @@ -16,6 +16,7 @@ use pretty_xmlish::{Pretty, XmlNode}; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::{DispatchStrategy, DispatcherType, ExchangeNode}; +use super::generic::{GenericPlanRef, PhysicalPlanRef}; use super::stream::StreamPlanRef; use super::utils::{childless_record, plan_node_name, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; @@ -37,7 +38,7 @@ impl StreamExchange { let base = PlanBase::new_stream( input.ctx(), input.schema().clone(), - input.logical_pk().to_vec(), + input.stream_key().map(|v| v.to_vec()), input.functional_dependency().clone(), dist, input.append_only(), @@ -53,12 +54,11 @@ impl StreamExchange { pub fn new_no_shuffle(input: PlanRef) -> Self { let ctx = input.ctx(); - let pk_indices = input.logical_pk().to_vec(); // Dispatch executor won't change the append-only behavior of the stream. let base = PlanBase::new_stream( ctx, input.schema().clone(), - pk_indices, + input.stream_key().map(|v| v.to_vec()), input.functional_dependency().clone(), input.distribution().clone(), input.append_only(), @@ -80,7 +80,7 @@ impl StreamExchange { impl Distill for StreamExchange { fn distill<'a>(&self) -> XmlNode<'a> { let distribution_display = DistributionDisplay { - distribution: &self.base.dist, + distribution: self.base.distribution(), input_schema: self.input.schema(), }; childless_record( @@ -119,13 +119,13 @@ impl StreamNode for StreamExchange { }) } else { Some(DispatchStrategy { - r#type: match &self.base.dist { + r#type: match &self.base.distribution() { Distribution::HashShard(_) => DispatcherType::Hash, Distribution::Single => DispatcherType::Simple, Distribution::Broadcast => DispatcherType::Broadcast, _ => panic!("Do not allow Any or AnyShard in serialization process"), } as i32, - dist_key_indices: match &self.base.dist { + dist_key_indices: match &self.base.distribution() { Distribution::HashShard(keys) => { keys.iter().map(|num| *num as u32).collect() } diff --git a/src/frontend/src/optimizer/plan_node/stream_expand.rs b/src/frontend/src/optimizer/plan_node/stream_expand.rs index 619bc98fc6406..5959b8d6be4d2 100644 --- a/src/frontend/src/optimizer/plan_node/stream_expand.rs +++ b/src/frontend/src/optimizer/plan_node/stream_expand.rs @@ -17,7 +17,6 @@ use risingwave_pb::stream_plan::expand_node::Subset; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::ExpandNode; -use super::stream::StreamPlanRef; use super::utils::impl_distill_by_unit; use super::{generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::optimizer::property::Distribution; @@ -26,12 +25,12 @@ use crate::stream_fragmenter::BuildFragmentGraphState; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamExpand { pub base: PlanBase, - logical: generic::Expand, + core: generic::Expand, } impl StreamExpand { - pub fn new(logical: generic::Expand) -> Self { - let input = logical.input.clone(); + pub fn new(core: generic::Expand) -> Self { + let input = core.input.clone(); let dist = match input.distribution() { Distribution::Single => Distribution::Single, @@ -41,7 +40,7 @@ impl StreamExpand { Distribution::Broadcast => unreachable!(), }; - let mut watermark_columns = FixedBitSet::with_capacity(logical.output_len()); + let mut watermark_columns = FixedBitSet::with_capacity(core.output_len()); watermark_columns.extend( input .watermark_columns() @@ -49,35 +48,35 @@ impl StreamExpand { .map(|idx| idx + input.schema().len()), ); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, input.append_only(), input.emit_on_window_close(), watermark_columns, ); - StreamExpand { base, logical } + StreamExpand { base, core } } pub fn column_subsets(&self) -> &[Vec] { - &self.logical.column_subsets + &self.core.column_subsets } } impl PlanTreeNodeUnary for StreamExpand { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { StreamExpand } -impl_distill_by_unit!(StreamExpand, logical, "StreamExpand"); +impl_distill_by_unit!(StreamExpand, core, "StreamExpand"); impl StreamNode for StreamExpand { fn to_stream_prost_body(&self, _state: &mut BuildFragmentGraphState) -> PbNodeBody { diff --git a/src/frontend/src/optimizer/plan_node/stream_filter.rs b/src/frontend/src/optimizer/plan_node/stream_filter.rs index 285a8086f5c81..0f000e6b8c0db 100644 --- a/src/frontend/src/optimizer/plan_node/stream_filter.rs +++ b/src/frontend/src/optimizer/plan_node/stream_filter.rs @@ -15,7 +15,6 @@ use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::FilterNode; -use super::stream::StreamPlanRef; use super::utils::impl_distill_by_unit; use super::{generic, ExprRewritable, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::expr::{Expr, ExprImpl, ExprRewriter}; @@ -27,43 +26,43 @@ use crate::utils::Condition; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamFilter { pub base: PlanBase, - logical: generic::Filter, + core: generic::Filter, } impl StreamFilter { - pub fn new(logical: generic::Filter) -> Self { - let input = logical.input.clone(); + pub fn new(core: generic::Filter) -> Self { + let input = core.input.clone(); let dist = input.distribution().clone(); // Filter executor won't change the append-only behavior of the stream. - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, input.append_only(), input.emit_on_window_close(), input.watermark_columns().clone(), ); - StreamFilter { base, logical } + StreamFilter { base, core } } pub fn predicate(&self) -> &Condition { - &self.logical.predicate + &self.core.predicate } } impl PlanTreeNodeUnary for StreamFilter { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { StreamFilter } -impl_distill_by_unit!(StreamFilter, logical, "StreamFilter"); +impl_distill_by_unit!(StreamFilter, core, "StreamFilter"); impl StreamNode for StreamFilter { fn to_stream_prost_body(&self, _state: &mut BuildFragmentGraphState) -> PbNodeBody { @@ -79,8 +78,8 @@ impl ExprRewritable for StreamFilter { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_fs_fetch.rs b/src/frontend/src/optimizer/plan_node/stream_fs_fetch.rs new file mode 100644 index 0000000000000..95fd72e9f6aa0 --- /dev/null +++ b/src/frontend/src/optimizer/plan_node/stream_fs_fetch.rs @@ -0,0 +1,125 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::rc::Rc; + +use fixedbitset::FixedBitSet; +use itertools::Itertools; +use pretty_xmlish::{Pretty, XmlNode}; +use risingwave_pb::stream_plan::stream_node::NodeBody; +use risingwave_pb::stream_plan::{PbStreamFsFetch, StreamFsFetchNode}; + +use super::{PlanBase, PlanRef, PlanTreeNodeUnary}; +use crate::catalog::source_catalog::SourceCatalog; +use crate::optimizer::plan_node::generic::GenericPlanRef; +use crate::optimizer::plan_node::utils::{childless_record, Distill}; +use crate::optimizer::plan_node::{generic, ExprRewritable, StreamNode}; +use crate::optimizer::property::Distribution; +use crate::stream_fragmenter::BuildFragmentGraphState; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct StreamFsFetch { + pub base: PlanBase, + input: PlanRef, + core: generic::Source, +} + +impl PlanTreeNodeUnary for StreamFsFetch { + fn input(&self) -> PlanRef { + self.input.clone() + } + + fn clone_with_input(&self, input: PlanRef) -> Self { + Self::new(input, self.core.clone()) + } +} +impl_plan_tree_node_for_unary! { StreamFsFetch } + +impl StreamFsFetch { + pub fn new(input: PlanRef, source: generic::Source) -> Self { + let base = PlanBase::new_stream_with_core( + &source, + Distribution::SomeShard, + source.catalog.as_ref().map_or(true, |s| s.append_only), + false, + FixedBitSet::with_capacity(source.column_catalog.len()), + ); + + Self { + base, + input, + core: source, + } + } + + fn get_columns(&self) -> Vec<&str> { + self.core + .column_catalog + .iter() + .map(|column| column.name()) + .collect() + } + + pub fn source_catalog(&self) -> Option> { + self.core.catalog.clone() + } +} + +impl Distill for StreamFsFetch { + fn distill<'a>(&self) -> XmlNode<'a> { + let columns = self + .get_columns() + .iter() + .map(|ele| Pretty::from(ele.to_string())) + .collect(); + let col = Pretty::Array(columns); + childless_record("StreamFsFetch", vec![("columns", col)]) + } +} + +impl ExprRewritable for StreamFsFetch {} + +impl StreamNode for StreamFsFetch { + fn to_stream_prost_body(&self, state: &mut BuildFragmentGraphState) -> NodeBody { + // `StreamFsFetch` is same as source in proto def, so the following code is the same as `StreamSource` + let source_catalog = self.source_catalog(); + let source_inner = source_catalog.map(|source_catalog| PbStreamFsFetch { + source_id: source_catalog.id, + source_name: source_catalog.name.clone(), + state_table: Some( + generic::Source::infer_internal_table_catalog() + .with_id(state.gen_table_id_wrapped()) + .to_internal_table_prost(), + ), + info: Some(source_catalog.info.clone()), + row_id_index: self.core.row_id_index.map(|index| index as _), + columns: self + .core + .column_catalog + .iter() + .map(|c| c.to_protobuf()) + .collect_vec(), + properties: source_catalog.properties.clone().into_iter().collect(), + rate_limit: self + .base + .ctx() + .session_ctx() + .config() + .get_streaming_rate_limit(), + }); + NodeBody::StreamFsFetch(StreamFsFetchNode { + node_inner: source_inner, + }) + } +} diff --git a/src/frontend/src/optimizer/plan_node/stream_group_topn.rs b/src/frontend/src/optimizer/plan_node/stream_group_topn.rs index 761807cba1ae7..3e8f3c00206c4 100644 --- a/src/frontend/src/optimizer/plan_node/stream_group_topn.rs +++ b/src/frontend/src/optimizer/plan_node/stream_group_topn.rs @@ -16,9 +16,11 @@ use fixedbitset::FixedBitSet; use pretty_xmlish::XmlNode; use risingwave_pb::stream_plan::stream_node::PbNodeBody; -use super::generic::{DistillUnit, TopNLimit}; +use super::generic::{DistillUnit, GenericPlanRef, TopNLimit}; +use super::stream::StreamPlanRef; use super::utils::{plan_node_name, watermark_pretty, Distill}; use super::{generic, ExprRewritable, PlanBase, PlanTreeNodeUnary, StreamNode}; +use crate::optimizer::plan_node::generic::GenericPlanNode; use crate::optimizer::property::Order; use crate::stream_fragmenter::BuildFragmentGraphState; use crate::PlanRef; @@ -26,24 +28,24 @@ use crate::PlanRef; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamGroupTopN { pub base: PlanBase, - logical: generic::TopN, + core: generic::TopN, /// an optional column index which is the vnode of each row computed by the input's consistent /// hash distribution vnode_col_idx: Option, } impl StreamGroupTopN { - pub fn new(logical: generic::TopN, vnode_col_idx: Option) -> Self { - assert!(!logical.group_key.is_empty()); - assert!(logical.limit_attr.limit() > 0); - let input = &logical.input; + pub fn new(core: generic::TopN, vnode_col_idx: Option) -> Self { + assert!(!core.group_key.is_empty()); + assert!(core.limit_attr.limit() > 0); + let input = &core.input; let schema = input.schema().clone(); let watermark_columns = if input.append_only() { input.watermark_columns().clone() } else { let mut watermark_columns = FixedBitSet::with_capacity(schema.len()); - for &idx in &logical.group_key { + for &idx in &core.group_key { if input.watermark_columns().contains(idx) { watermark_columns.insert(idx); } @@ -51,8 +53,22 @@ impl StreamGroupTopN { watermark_columns }; - let base = PlanBase::new_stream_with_logical( - &logical, + let mut stream_key = core + .stream_key() + .expect("logical node should have stream key here"); + if let Some(vnode_col_idx) = vnode_col_idx && stream_key.len() > 1 { + // The output stream key of `GroupTopN` is a union of group key and input stream key, + // while vnode is calculated from a subset of input stream key. So we can safely remove + // the vnode column from output stream key. While at meanwhile we cannot leave the stream key + // as empty, so we only remove it when stream key length is > 1. + stream_key.remove(stream_key.iter().position(|i| *i == vnode_col_idx).unwrap()); + } + + let base = PlanBase::new_stream( + core.ctx(), + core.schema(), + Some(stream_key), + core.functional_dependency(), input.distribution().clone(), false, // TODO: https://github.com/risingwavelabs/risingwave/issues/8348 @@ -61,25 +77,25 @@ impl StreamGroupTopN { ); StreamGroupTopN { base, - logical, + core, vnode_col_idx, } } pub fn limit_attr(&self) -> TopNLimit { - self.logical.limit_attr + self.core.limit_attr } pub fn offset(&self) -> u64 { - self.logical.offset + self.core.offset } pub fn topn_order(&self) -> &Order { - &self.logical.order + &self.core.order } pub fn group_key(&self) -> &[usize] { - &self.logical.group_key + &self.core.group_key } } @@ -89,11 +105,11 @@ impl StreamNode for StreamGroupTopN { let input = self.input(); let table = self - .logical + .core .infer_internal_table_catalog( input.schema(), input.ctx(), - input.logical_pk(), + input.expect_stream_key(), self.vnode_col_idx, ) .with_id(state.gen_table_id_wrapped()); @@ -119,8 +135,8 @@ impl Distill for StreamGroupTopN { let name = plan_node_name!("StreamGroupTopN", { "append_only", self.input().append_only() }, ); - let mut node = self.logical.distill_with_name(name); - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + let mut node = self.core.distill_with_name(name); + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { node.fields.push(("output_watermarks".into(), ow)); } node @@ -131,13 +147,13 @@ impl_plan_tree_node_for_unary! { StreamGroupTopN } impl PlanTreeNodeUnary for StreamGroupTopN { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical, self.vnode_col_idx) + let mut core = self.core.clone(); + core.input = input; + Self::new(core, self.vnode_col_idx) } } diff --git a/src/frontend/src/optimizer/plan_node/stream_hash_agg.rs b/src/frontend/src/optimizer/plan_node/stream_hash_agg.rs index d4e48a94e417b..55ab6b5906e59 100644 --- a/src/frontend/src/optimizer/plan_node/stream_hash_agg.rs +++ b/src/frontend/src/optimizer/plan_node/stream_hash_agg.rs @@ -18,17 +18,18 @@ use pretty_xmlish::XmlNode; use risingwave_common::error::{ErrorCode, Result}; use risingwave_pb::stream_plan::stream_node::PbNodeBody; -use super::generic::{self, PlanAggCall}; +use super::generic::{self, GenericPlanRef, PlanAggCall}; use super::utils::{childless_record, plan_node_name, watermark_pretty, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::expr::ExprRewriter; +use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::stream_fragmenter::BuildFragmentGraphState; use crate::utils::{ColIndexMapping, ColIndexMappingRewriteExt, IndexSet}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamHashAgg { pub base: PlanBase, - logical: generic::Agg, + core: generic::Agg, /// An optional column index which is the vnode of each row computed by the input's consistent /// hash distribution. @@ -46,38 +47,38 @@ pub struct StreamHashAgg { impl StreamHashAgg { pub fn new( - logical: generic::Agg, + core: generic::Agg, vnode_col_idx: Option, row_count_idx: usize, ) -> Self { - Self::new_with_eowc(logical, vnode_col_idx, row_count_idx, false) + Self::new_with_eowc(core, vnode_col_idx, row_count_idx, false) } pub fn new_with_eowc( - logical: generic::Agg, + core: generic::Agg, vnode_col_idx: Option, row_count_idx: usize, emit_on_window_close: bool, ) -> Self { - assert_eq!(logical.agg_calls[row_count_idx], PlanAggCall::count_star()); + assert_eq!(core.agg_calls[row_count_idx], PlanAggCall::count_star()); - let input = logical.input.clone(); + let input = core.input.clone(); let input_dist = input.distribution(); - let dist = logical + let dist = core .i2o_col_mapping() .rewrite_provided_distribution(input_dist); - let mut watermark_columns = FixedBitSet::with_capacity(logical.output_len()); + let mut watermark_columns = FixedBitSet::with_capacity(core.output_len()); let mut window_col_idx = None; - let mapping = logical.i2o_col_mapping(); + let mapping = core.i2o_col_mapping(); if emit_on_window_close { - let wtmk_group_key = logical.watermark_group_key(input.watermark_columns()); + let wtmk_group_key = core.watermark_group_key(input.watermark_columns()); assert!(wtmk_group_key.len() == 1); // checked in `to_eowc_version` window_col_idx = Some(wtmk_group_key[0]); // EOWC HashAgg only produce one watermark column, i.e. the window column watermark_columns.insert(mapping.map(wtmk_group_key[0])); } else { - for idx in logical.group_key.indices() { + for idx in core.group_key.indices() { if input.watermark_columns().contains(idx) { watermark_columns.insert(mapping.map(idx)); } @@ -85,8 +86,8 @@ impl StreamHashAgg { } // Hash agg executor might change the append-only behavior of the stream. - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, emit_on_window_close, // in EOWC mode, we produce append only output emit_on_window_close, @@ -94,7 +95,7 @@ impl StreamHashAgg { ); StreamHashAgg { base, - logical, + core, vnode_col_idx, row_count_idx, emit_on_window_close, @@ -103,22 +104,22 @@ impl StreamHashAgg { } pub fn agg_calls(&self) -> &[PlanAggCall] { - &self.logical.agg_calls + &self.core.agg_calls } pub fn group_key(&self) -> &IndexSet { - &self.logical.group_key + &self.core.group_key } pub(crate) fn i2o_col_mapping(&self) -> ColIndexMapping { - self.logical.i2o_col_mapping() + self.core.i2o_col_mapping() } // TODO(rc): It'll be better to force creation of EOWC version through `new`, especially when we // optimize for 2-phase EOWC aggregation later. pub fn to_eowc_version(&self) -> Result { let input = self.input(); - let wtmk_group_key = self.logical.watermark_group_key(input.watermark_columns()); + let wtmk_group_key = self.core.watermark_group_key(input.watermark_columns()); if wtmk_group_key.is_empty() || wtmk_group_key.len() > 1 { return Err(ErrorCode::NotSupported( @@ -130,7 +131,7 @@ impl StreamHashAgg { } Ok(Self::new_with_eowc( - self.logical.clone(), + self.core.clone(), self.vnode_col_idx, self.row_count_idx, true, @@ -141,8 +142,8 @@ impl StreamHashAgg { impl Distill for StreamHashAgg { fn distill<'a>(&self) -> XmlNode<'a> { - let mut vec = self.logical.fields_pretty(); - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + let mut vec = self.core.fields_pretty(); + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { vec.push(("output_watermarks", ow)); } childless_record( @@ -158,13 +159,13 @@ impl Distill for StreamHashAgg { impl PlanTreeNodeUnary for StreamHashAgg { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { let logical = generic::Agg { input, - ..self.logical.clone() + ..self.core.clone() }; Self::new_with_eowc( logical, @@ -179,8 +180,8 @@ impl_plan_tree_node_for_unary! { StreamHashAgg } impl StreamNode for StreamHashAgg { fn to_stream_prost_body(&self, state: &mut BuildFragmentGraphState) -> PbNodeBody { use risingwave_pb::stream_plan::*; - let (result_table, agg_states, distinct_dedup_tables) = - self.logical + let (intermediate_state_table, agg_states, distinct_dedup_tables) = + self.core .infer_tables(&self.base, self.vnode_col_idx, self.window_col_idx); PbNodeBody::HashAgg(HashAggNode { @@ -196,8 +197,8 @@ impl StreamNode for StreamHashAgg { .into_iter() .map(|s| s.into_prost(state)) .collect(), - result_table: Some( - result_table + intermediate_state_table: Some( + intermediate_state_table .with_id(state.gen_table_id_wrapped()) .to_internal_table_prost(), ), @@ -214,7 +215,7 @@ impl StreamNode for StreamHashAgg { }) .collect(), row_count_index: self.row_count_idx as u32, - emit_on_window_close: self.base.emit_on_window_close, + emit_on_window_close: self.base.emit_on_window_close(), }) } } @@ -225,10 +226,10 @@ impl ExprRewritable for StreamHashAgg { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); + let mut core = self.core.clone(); + core.rewrite_exprs(r); Self::new_with_eowc( - logical, + core, self.vnode_col_idx, self.row_count_idx, self.emit_on_window_close, diff --git a/src/frontend/src/optimizer/plan_node/stream_hash_join.rs b/src/frontend/src/optimizer/plan_node/stream_hash_join.rs index ea0925cf33bb7..9d9c41425c4b1 100644 --- a/src/frontend/src/optimizer/plan_node/stream_hash_join.rs +++ b/src/frontend/src/optimizer/plan_node/stream_hash_join.rs @@ -20,6 +20,8 @@ use risingwave_pb::plan_common::JoinType; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::{DeltaExpression, HashJoinNode, PbInequalityPair}; +use super::generic::{GenericPlanRef, Join}; +use super::stream::StreamPlanRef; use super::utils::{childless_record, plan_node_name, watermark_pretty, Distill}; use super::{ generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeBinary, StreamDeltaJoin, StreamNode, @@ -37,7 +39,7 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamHashJoin { pub base: PlanBase, - logical: generic::Join, + core: generic::Join, /// The join condition must be equivalent to `logical.on`, but separated into equal and /// non-equal parts to facilitate execution later @@ -62,18 +64,14 @@ pub struct StreamHashJoin { } impl StreamHashJoin { - pub fn new(logical: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { + pub fn new(core: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { // Inner join won't change the append-only behavior of the stream. The rest might. - let append_only = match logical.join_type { - JoinType::Inner => logical.left.append_only() && logical.right.append_only(), + let append_only = match core.join_type { + JoinType::Inner => core.left.append_only() && core.right.append_only(), _ => false, }; - let dist = Self::derive_dist( - logical.left.distribution(), - logical.right.distribution(), - &logical, - ); + let dist = Self::derive_dist(core.left.distribution(), core.right.distribution(), &core); let mut inequality_pairs = vec![]; let mut clean_left_state_conjunction_idx = None; @@ -82,8 +80,8 @@ impl StreamHashJoin { // Reorder `eq_join_predicate` by placing the watermark column at the beginning. let mut reorder_idx = vec![]; for (i, (left_key, right_key)) in eq_join_predicate.eq_indexes().iter().enumerate() { - if logical.left.watermark_columns().contains(*left_key) - && logical.right.watermark_columns().contains(*right_key) + if core.left.watermark_columns().contains(*left_key) + && core.right.watermark_columns().contains(*right_key) { reorder_idx.push(i); } @@ -91,14 +89,14 @@ impl StreamHashJoin { let eq_join_predicate = eq_join_predicate.reorder(&reorder_idx); let watermark_columns = { - let l2i = logical.l2i_col_mapping(); - let r2i = logical.r2i_col_mapping(); + let l2i = core.l2i_col_mapping(); + let r2i = core.r2i_col_mapping(); let mut equal_condition_clean_state = false; - let mut watermark_columns = FixedBitSet::with_capacity(logical.internal_column_num()); + let mut watermark_columns = FixedBitSet::with_capacity(core.internal_column_num()); for (left_key, right_key) in eq_join_predicate.eq_indexes() { - if logical.left.watermark_columns().contains(left_key) - && logical.right.watermark_columns().contains(right_key) + if core.left.watermark_columns().contains(left_key) + && core.right.watermark_columns().contains(right_key) { equal_condition_clean_state = true; if let Some(internal) = l2i.try_map(left_key) { @@ -120,20 +118,14 @@ impl StreamHashJoin { ) in original_inequality_pairs { let both_upstream_has_watermark = if key_required_larger < key_required_smaller { - logical - .left - .watermark_columns() - .contains(key_required_larger) - && logical + core.left.watermark_columns().contains(key_required_larger) + && core .right .watermark_columns() .contains(key_required_smaller - left_cols_num) } else { - logical - .left - .watermark_columns() - .contains(key_required_smaller) - && logical + core.left.watermark_columns().contains(key_required_smaller) + && core .right .watermark_columns() .contains(key_required_larger - left_cols_num) @@ -183,12 +175,12 @@ impl StreamHashJoin { )); } } - logical.i2o_col_mapping().rewrite_bitset(&watermark_columns) + core.i2o_col_mapping().rewrite_bitset(&watermark_columns) }; // TODO: derive from input - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, append_only, false, // TODO(rc): derive EOWC property from input @@ -197,7 +189,7 @@ impl StreamHashJoin { Self { base, - logical, + core, eq_join_predicate, inequality_pairs, is_append_only: append_only, @@ -208,7 +200,7 @@ impl StreamHashJoin { /// Get join type pub fn join_type(&self) -> JoinType { - self.logical.join_type + self.core.join_type } /// Get a reference to the batch hash join's eq join predicate. @@ -255,7 +247,7 @@ impl StreamHashJoin { /// Convert this hash join to a delta join plan pub fn into_delta_join(self) -> StreamDeltaJoin { - StreamDeltaJoin::new(self.logical, self.eq_join_predicate) + StreamDeltaJoin::new(self.core, self.eq_join_predicate) } pub fn derive_dist_key_in_join_key(&self) -> Vec { @@ -300,11 +292,11 @@ impl Distill for StreamHashJoin { { "interval", self.clean_left_state_conjunction_idx.is_some() && self.clean_right_state_conjunction_idx.is_some() }, { "append_only", self.is_append_only }, ); - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(6); - vec.push(("type", Pretty::debug(&self.logical.join_type))); + vec.push(("type", Pretty::debug(&self.core.join_type))); - let concat_schema = self.logical.concat_schema(); + let concat_schema = self.core.concat_schema(); vec.push(( "predicate", Pretty::debug(&EqJoinPredicateDisplay { @@ -325,12 +317,12 @@ impl Distill for StreamHashJoin { if let Some(i) = self.clean_right_state_conjunction_idx { vec.push(("conditions_to_clean_right_state_table", get_cond(i))); } - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { vec.push(("output_watermarks", ow)); } if verbose { - let data = IndicesDisplay::from_join(&self.logical, &concat_schema); + let data = IndicesDisplay::from_join(&self.core, &concat_schema); vec.push(("output", data)); } @@ -340,18 +332,18 @@ impl Distill for StreamHashJoin { impl PlanTreeNodeBinary for StreamHashJoin { fn left(&self) -> PlanRef { - self.logical.left.clone() + self.core.left.clone() } fn right(&self) -> PlanRef { - self.logical.right.clone() + self.core.right.clone() } fn clone_with_left_right(&self, left: PlanRef, right: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.left = left; - logical.right = right; - Self::new(logical, self.eq_join_predicate.clone()) + let mut core = self.core.clone(); + core.left = left; + core.right = right; + Self::new(core, self.eq_join_predicate.clone()) } } @@ -366,15 +358,14 @@ impl StreamNode for StreamHashJoin { let dk_indices_in_jk = self.derive_dist_key_in_join_key(); - use super::stream::HashJoin; let (left_table, left_degree_table, left_deduped_input_pk_indices) = - HashJoin::infer_internal_and_degree_table_catalog( + Join::infer_internal_and_degree_table_catalog( self.left().plan_base(), left_jk_indices, dk_indices_in_jk.clone(), ); let (right_table, right_degree_table, right_deduped_input_pk_indices) = - HashJoin::infer_internal_and_degree_table_catalog( + Join::infer_internal_and_degree_table_catalog( self.right().plan_base(), right_jk_indices, dk_indices_in_jk, @@ -402,7 +393,7 @@ impl StreamNode for StreamHashJoin { let null_safe_prost = self.eq_join_predicate.null_safes().into_iter().collect(); NodeBody::HashJoin(HashJoinNode { - join_type: self.logical.join_type as i32, + join_type: self.core.join_type as i32, left_key: left_jk_indices_prost, right_key: right_jk_indices_prost, null_safe: null_safe_prost, @@ -443,12 +434,7 @@ impl StreamNode for StreamHashJoin { right_degree_table: Some(right_degree_table.to_internal_table_prost()), left_deduped_input_pk_indices, right_deduped_input_pk_indices, - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), is_append_only: self.is_append_only, }) } @@ -460,8 +446,8 @@ impl ExprRewritable for StreamHashJoin { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.eq_join_predicate.rewrite_exprs(r)).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.eq_join_predicate.rewrite_exprs(r)).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_hop_window.rs b/src/frontend/src/optimizer/plan_node/stream_hop_window.rs index ec18ee2588770..e177be6942360 100644 --- a/src/frontend/src/optimizer/plan_node/stream_hop_window.rs +++ b/src/frontend/src/optimizer/plan_node/stream_hop_window.rs @@ -17,6 +17,7 @@ use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::HopWindowNode; +use super::generic::GenericPlanRef; use super::stream::StreamPlanRef; use super::utils::{childless_record, watermark_pretty, Distill}; use super::{generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; @@ -28,37 +29,37 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamHopWindow { pub base: PlanBase, - logical: generic::HopWindow, + core: generic::HopWindow, window_start_exprs: Vec, window_end_exprs: Vec, } impl StreamHopWindow { pub fn new( - logical: generic::HopWindow, + core: generic::HopWindow, window_start_exprs: Vec, window_end_exprs: Vec, ) -> Self { - let input = logical.input.clone(); - let i2o = logical.i2o_col_mapping(); + let input = core.input.clone(); + let i2o = core.i2o_col_mapping(); let dist = i2o.rewrite_provided_distribution(input.distribution()); let mut watermark_columns = input.watermark_columns().clone(); - watermark_columns.grow(logical.internal_column_num()); + watermark_columns.grow(core.internal_column_num()); - if watermark_columns.contains(logical.time_col.index) { + if watermark_columns.contains(core.time_col.index) { // Watermark on `time_col` indicates watermark on both `window_start` and `window_end`. - watermark_columns.insert(logical.internal_window_start_col_idx()); - watermark_columns.insert(logical.internal_window_end_col_idx()); + watermark_columns.insert(core.internal_window_start_col_idx()); + watermark_columns.insert(core.internal_window_end_col_idx()); } let watermark_columns = ColIndexMapping::with_remaining_columns( - &logical.output_indices, - logical.internal_column_num(), + &core.output_indices, + core.internal_column_num(), ) .rewrite_bitset(&watermark_columns); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, input.append_only(), input.emit_on_window_close(), @@ -66,7 +67,7 @@ impl StreamHopWindow { ); Self { base, - logical, + core, window_start_exprs, window_end_exprs, } @@ -75,8 +76,8 @@ impl StreamHopWindow { impl Distill for StreamHopWindow { fn distill<'a>(&self) -> XmlNode<'a> { - let mut vec = self.logical.fields_pretty(); - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + let mut vec = self.core.fields_pretty(); + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { vec.push(("output_watermarks", ow)); } childless_record("StreamHopWindow", vec) @@ -85,14 +86,14 @@ impl Distill for StreamHopWindow { impl PlanTreeNodeUnary for StreamHopWindow { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; + let mut core = self.core.clone(); + core.input = input; Self::new( - logical, + core, self.window_start_exprs.clone(), self.window_end_exprs.clone(), ) @@ -104,15 +105,10 @@ impl_plan_tree_node_for_unary! {StreamHopWindow} impl StreamNode for StreamHopWindow { fn to_stream_prost_body(&self, _state: &mut BuildFragmentGraphState) -> PbNodeBody { PbNodeBody::HopWindow(HopWindowNode { - time_col: self.logical.time_col.index() as _, - window_slide: Some(self.logical.window_slide.into()), - window_size: Some(self.logical.window_size.into()), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), + time_col: self.core.time_col.index() as _, + window_slide: Some(self.core.window_slide.into()), + window_size: Some(self.core.window_size.into()), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), window_start_exprs: self .window_start_exprs .clone() @@ -136,7 +132,7 @@ impl ExprRewritable for StreamHopWindow { fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { Self::new( - self.logical.clone(), + self.core.clone(), self.window_start_exprs .clone() .into_iter() diff --git a/src/frontend/src/optimizer/plan_node/stream_materialize.rs b/src/frontend/src/optimizer/plan_node/stream_materialize.rs index 66be991bae95c..9c87f1a34abbd 100644 --- a/src/frontend/src/optimizer/plan_node/stream_materialize.rs +++ b/src/frontend/src/optimizer/plan_node/stream_materialize.rs @@ -24,11 +24,13 @@ use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use super::derive::derive_columns; +use super::stream::StreamPlanRef; use super::utils::{childless_record, Distill}; use super::{reorganize_elements_id, ExprRewritable, PlanRef, PlanTreeNodeUnary, StreamNode}; -use crate::catalog::table_catalog::{TableCatalog, TableType, TableVersion}; +use crate::catalog::table_catalog::{CreateType, TableCatalog, TableType, TableVersion}; use crate::catalog::FragmentId; use crate::optimizer::plan_node::derive::derive_pk; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{PlanBase, PlanNodeMeta}; use crate::optimizer::property::{Cardinality, Distribution, Order, RequiredDist}; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -45,7 +47,16 @@ pub struct StreamMaterialize { impl StreamMaterialize { #[must_use] pub fn new(input: PlanRef, table: TableCatalog) -> Self { - let base = PlanBase::derive_stream_plan_base(&input); + let base = PlanBase::new_stream( + input.ctx(), + input.schema().clone(), + Some(table.stream_key.clone()), + input.functional_dependency().clone(), + input.distribution().clone(), + input.append_only(), + input.emit_on_window_close(), + input.watermark_columns().clone(), + ); Self { base, input, table } } @@ -140,7 +151,22 @@ impl StreamMaterialize { TableType::MaterializedView => { assert_matches!(user_distributed_by, RequiredDist::Any); // ensure the same pk will not shuffle to different node - RequiredDist::shard_by_key(input.schema().len(), input.logical_pk()) + let required_dist = + RequiredDist::shard_by_key(input.schema().len(), input.expect_stream_key()); + + // If the input is a stream join, enforce the stream key as the materialized + // view distribution key to avoid slow backfilling caused by + // data skew of the dimension table join key. + // See for more information. + let is_stream_join = matches!(input.as_stream_hash_join(), Some(_join)) + || matches!(input.as_stream_temporal_join(), Some(_join)) + || matches!(input.as_stream_delta_join(), Some(_join)); + + if is_stream_join { + return Ok(required_dist.enforce(input, &Order::any())); + } + + required_dist } TableType::Index => { assert_matches!( @@ -182,24 +208,25 @@ impl StreamMaterialize { let append_only = input.append_only(); let watermark_columns = input.watermark_columns().clone(); - let (pk, stream_key) = if let Some(pk_column_indices) = pk_column_indices { - let pk = pk_column_indices + let (table_pk, stream_key) = if let Some(pk_column_indices) = pk_column_indices { + let table_pk = pk_column_indices .iter() .map(|idx| ColumnOrder::new(*idx, OrderType::ascending())) .collect(); - // No order by for create table, so stream key is identical to pk. - (pk, pk_column_indices) + // No order by for create table, so stream key is identical to table pk. + (table_pk, pk_column_indices) } else { derive_pk(input, user_order_by, &columns) }; + // assert: `stream_key` is a subset of `table_pk` - let read_prefix_len_hint = stream_key.len(); + let read_prefix_len_hint = table_pk.len(); Ok(TableCatalog { id: TableId::placeholder(), associated_source_id: None, name, columns, - pk, + pk: table_pk, stream_key, distribution_key, table_type, @@ -222,6 +249,7 @@ impl StreamMaterialize { created_at_epoch: None, initialized_at_epoch: None, cleaned_by_watermark: false, + create_type: CreateType::Foreground, // Will be updated in the handler itself. }) } @@ -262,8 +290,8 @@ impl Distill for StreamMaterialize { vec.push(("pk_conflict", Pretty::from(pk_conflict_behavior))); - let watermark_columns = &self.base.watermark_columns; - if self.base.watermark_columns.count_ones(..) > 0 { + let watermark_columns = &self.base.watermark_columns(); + if self.base.watermark_columns().count_ones(..) > 0 { let watermark_column_names = watermark_columns .ones() .map(|i| table.columns()[i].name_with_hidden().to_string()) @@ -283,16 +311,16 @@ impl PlanTreeNodeUnary for StreamMaterialize { fn clone_with_input(&self, input: PlanRef) -> Self { let new = Self::new(input, self.table().clone()); new.base - .schema + .schema() .fields .iter() - .zip_eq_fast(self.base.schema.fields.iter()) + .zip_eq_fast(self.base.schema().fields.iter()) .for_each(|(a, b)| { assert_eq!(a.data_type, b.data_type); assert_eq!(a.type_name, b.type_name); assert_eq!(a.sub_fields, b.sub_fields); }); - assert_eq!(new.plan_base().logical_pk, self.plan_base().logical_pk); + assert_eq!(new.plan_base().stream_key(), self.plan_base().stream_key()); new } } diff --git a/src/frontend/src/optimizer/plan_node/stream_now.rs b/src/frontend/src/optimizer/plan_node/stream_now.rs index da1f04f2a2698..91ebc344fa51d 100644 --- a/src/frontend/src/optimizer/plan_node/stream_now.rs +++ b/src/frontend/src/optimizer/plan_node/stream_now.rs @@ -19,8 +19,7 @@ use risingwave_common::types::DataType; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::NowNode; -use super::generic::GenericPlanRef; -use super::stream::StreamPlanRef; +use super::generic::{GenericPlanRef, PhysicalPlanRef}; use super::utils::{childless_record, Distill, TableCatalogBuilder}; use super::{ExprRewritable, LogicalNow, PlanBase, StreamNode}; use crate::optimizer::plan_node::utils::column_names_pretty; @@ -46,7 +45,7 @@ impl StreamNow { let base = PlanBase::new_stream( ctx, schema, - vec![], + Some(vec![]), FunctionalDependencySet::default(), Distribution::Single, false, @@ -59,7 +58,7 @@ impl StreamNow { impl Distill for StreamNow { fn distill<'a>(&self) -> XmlNode<'a> { - let vec = if self.base.ctx.is_explain_verbose() { + let vec = if self.base.ctx().is_explain_verbose() { vec![("output", column_names_pretty(self.schema()))] } else { vec![] diff --git a/src/frontend/src/optimizer/plan_node/stream_over_window.rs b/src/frontend/src/optimizer/plan_node/stream_over_window.rs index b07c75a1f261e..5a2f9d98f1340 100644 --- a/src/frontend/src/optimizer/plan_node/stream_over_window.rs +++ b/src/frontend/src/optimizer/plan_node/stream_over_window.rs @@ -21,75 +21,76 @@ use risingwave_pb::stream_plan::stream_node::PbNodeBody; use super::generic::{GenericPlanNode, PlanWindowFunction}; use super::utils::{impl_distill_by_unit, TableCatalogBuilder}; use super::{generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::stream_fragmenter::BuildFragmentGraphState; use crate::TableCatalog; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamOverWindow { pub base: PlanBase, - logical: generic::OverWindow, + core: generic::OverWindow, } impl StreamOverWindow { - pub fn new(logical: generic::OverWindow) -> Self { - assert!(logical.funcs_have_same_partition_and_order()); + pub fn new(core: generic::OverWindow) -> Self { + assert!(core.funcs_have_same_partition_and_order()); - let input = &logical.input; - let watermark_columns = FixedBitSet::with_capacity(logical.output_len()); + let input = &core.input; + let watermark_columns = FixedBitSet::with_capacity(core.output_len()); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, input.distribution().clone(), false, // general over window cannot be append-only false, watermark_columns, ); - StreamOverWindow { base, logical } + StreamOverWindow { base, core } } fn infer_state_table(&self) -> TableCatalog { let mut tbl_builder = TableCatalogBuilder::new(self.ctx().with_options().internal_table_subset()); - let out_schema = self.logical.schema(); + let out_schema = self.core.schema(); for field in out_schema.fields() { tbl_builder.add_column(field); } let mut order_cols = HashSet::new(); - for idx in self.logical.partition_key_indices() { + for idx in self.core.partition_key_indices() { if order_cols.insert(idx) { tbl_builder.add_order_column(idx, OrderType::ascending()); } } let read_prefix_len_hint = tbl_builder.get_current_pk_len(); - for o in self.logical.order_key() { + for o in self.core.order_key() { if order_cols.insert(o.column_index) { tbl_builder.add_order_column(o.column_index, o.order_type); } } - for &idx in self.logical.input.logical_pk() { + for &idx in self.core.input.expect_stream_key() { if order_cols.insert(idx) { tbl_builder.add_order_column(idx, OrderType::ascending()); } } - let in_dist_key = self.logical.input.distribution().dist_column_indices(); + let in_dist_key = self.core.input.distribution().dist_column_indices(); tbl_builder.build(in_dist_key.to_vec(), read_prefix_len_hint) } } -impl_distill_by_unit!(StreamOverWindow, logical, "StreamOverWindow"); +impl_distill_by_unit!(StreamOverWindow, core, "StreamOverWindow"); impl PlanTreeNodeUnary for StreamOverWindow { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { StreamOverWindow } @@ -99,19 +100,19 @@ impl StreamNode for StreamOverWindow { use risingwave_pb::stream_plan::*; let calls = self - .logical + .core .window_functions() .iter() .map(PlanWindowFunction::to_protobuf) .collect(); let partition_by = self - .logical + .core .partition_key_indices() .into_iter() .map(|idx| idx as _) .collect(); let order_by = self - .logical + .core .order_key() .iter() .map(ColumnOrder::to_protobuf) @@ -122,7 +123,7 @@ impl StreamNode for StreamOverWindow { .to_internal_table_prost(); let cache_policy = self .base - .ctx + .ctx() .session_ctx() .config() .get_streaming_over_window_cache_policy(); diff --git a/src/frontend/src/optimizer/plan_node/stream_project.rs b/src/frontend/src/optimizer/plan_node/stream_project.rs index 3a159a957af47..c0ff0d1cf2f43 100644 --- a/src/frontend/src/optimizer/plan_node/stream_project.rs +++ b/src/frontend/src/optimizer/plan_node/stream_project.rs @@ -17,6 +17,7 @@ use pretty_xmlish::XmlNode; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::ProjectNode; +use super::generic::GenericPlanRef; use super::stream::StreamPlanRef; use super::utils::{childless_record, watermark_pretty, Distill}; use super::{generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; @@ -29,7 +30,7 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamProject { pub base: PlanBase, - logical: generic::Project, + core: generic::Project, /// All the watermark derivations, (input_column_index, output_column_index). And the /// derivation expression is the project's expression itself. watermark_derivations: Vec<(usize, usize)>, @@ -40,9 +41,9 @@ pub struct StreamProject { impl Distill for StreamProject { fn distill<'a>(&self) -> XmlNode<'a> { let schema = self.schema(); - let mut vec = self.logical.fields_pretty(schema); + let mut vec = self.core.fields_pretty(schema); if let Some(display_output_watermarks) = - watermark_pretty(&self.base.watermark_columns, schema) + watermark_pretty(self.base.watermark_columns(), schema) { vec.push(("output_watermarks", display_output_watermarks)); } @@ -51,16 +52,16 @@ impl Distill for StreamProject { } impl StreamProject { - pub fn new(logical: generic::Project) -> Self { - let input = logical.input.clone(); - let distribution = logical + pub fn new(core: generic::Project) -> Self { + let input = core.input.clone(); + let distribution = core .i2o_col_mapping() .rewrite_provided_distribution(input.distribution()); let mut watermark_derivations = vec![]; let mut nondecreasing_exprs = vec![]; - let mut watermark_columns = FixedBitSet::with_capacity(logical.exprs.len()); - for (expr_idx, expr) in logical.exprs.iter().enumerate() { + let mut watermark_columns = FixedBitSet::with_capacity(core.exprs.len()); + for (expr_idx, expr) in core.exprs.iter().enumerate() { match try_derive_watermark(expr) { WatermarkDerivation::Watermark(input_idx) => { if input.watermark_columns().contains(input_idx) { @@ -80,8 +81,8 @@ impl StreamProject { } // Project executor won't change the append-only behavior of the stream, so it depends on // input's `append_only`. - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, distribution, input.append_only(), input.emit_on_window_close(), @@ -89,30 +90,30 @@ impl StreamProject { ); StreamProject { base, - logical, + core, watermark_derivations, nondecreasing_exprs, } } pub fn as_logical(&self) -> &generic::Project { - &self.logical + &self.core } pub fn exprs(&self) -> &Vec { - &self.logical.exprs + &self.core.exprs } } impl PlanTreeNodeUnary for StreamProject { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! {StreamProject} @@ -125,12 +126,7 @@ impl StreamNode for StreamProject { .map(|(i, o)| (*i as u32, *o as u32)) .unzip(); PbNodeBody::Project(ProjectNode { - select_list: self - .logical - .exprs - .iter() - .map(|x| x.to_expr_proto()) - .collect(), + select_list: self.core.exprs.iter().map(|x| x.to_expr_proto()).collect(), watermark_input_cols, watermark_output_cols, nondecreasing_exprs: self.nondecreasing_exprs.iter().map(|i| *i as _).collect(), @@ -144,8 +140,8 @@ impl ExprRewritable for StreamProject { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_project_set.rs b/src/frontend/src/optimizer/plan_node/stream_project_set.rs index 619fec1f80d15..ba09d79c96c60 100644 --- a/src/frontend/src/optimizer/plan_node/stream_project_set.rs +++ b/src/frontend/src/optimizer/plan_node/stream_project_set.rs @@ -17,7 +17,6 @@ use itertools::Itertools; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::ProjectSetNode; -use super::stream::StreamPlanRef; use super::utils::impl_distill_by_unit; use super::{generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::expr::{try_derive_watermark, ExprRewriter, WatermarkDerivation}; @@ -27,7 +26,7 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamProjectSet { pub base: PlanBase, - logical: generic::ProjectSet, + core: generic::ProjectSet, /// All the watermark derivations, (input_column_idx, expr_idx). And the /// derivation expression is the project_set's expression itself. watermark_derivations: Vec<(usize, usize)>, @@ -37,16 +36,16 @@ pub struct StreamProjectSet { } impl StreamProjectSet { - pub fn new(logical: generic::ProjectSet) -> Self { - let input = logical.input.clone(); - let distribution = logical + pub fn new(core: generic::ProjectSet) -> Self { + let input = core.input.clone(); + let distribution = core .i2o_col_mapping() .rewrite_provided_distribution(input.distribution()); let mut watermark_derivations = vec![]; let mut nondecreasing_exprs = vec![]; - let mut watermark_columns = FixedBitSet::with_capacity(logical.output_len()); - for (expr_idx, expr) in logical.select_list.iter().enumerate() { + let mut watermark_columns = FixedBitSet::with_capacity(core.output_len()); + for (expr_idx, expr) in core.select_list.iter().enumerate() { match try_derive_watermark(expr) { WatermarkDerivation::Watermark(input_idx) => { if input.watermark_columns().contains(input_idx) { @@ -67,8 +66,8 @@ impl StreamProjectSet { // ProjectSet executor won't change the append-only behavior of the stream, so it depends on // input's `append_only`. - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, distribution, input.append_only(), input.emit_on_window_close(), @@ -76,24 +75,24 @@ impl StreamProjectSet { ); StreamProjectSet { base, - logical, + core, watermark_derivations, nondecreasing_exprs, } } } -impl_distill_by_unit!(StreamProjectSet, logical, "StreamProjectSet"); +impl_distill_by_unit!(StreamProjectSet, core, "StreamProjectSet"); impl_plan_tree_node_for_unary! { StreamProjectSet } impl PlanTreeNodeUnary for StreamProjectSet { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -106,7 +105,7 @@ impl StreamNode for StreamProjectSet { .unzip(); PbNodeBody::ProjectSet(ProjectSetNode { select_list: self - .logical + .core .select_list .iter() .map(|select_item| select_item.to_project_set_select_item_proto()) @@ -124,8 +123,8 @@ impl ExprRewritable for StreamProjectSet { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_row_id_gen.rs b/src/frontend/src/optimizer/plan_node/stream_row_id_gen.rs index 1562306825dfd..083cb877cd4d6 100644 --- a/src/frontend/src/optimizer/plan_node/stream_row_id_gen.rs +++ b/src/frontend/src/optimizer/plan_node/stream_row_id_gen.rs @@ -17,7 +17,6 @@ use risingwave_pb::stream_plan::stream_node::PbNodeBody; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::property::Distribution; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -40,7 +39,7 @@ impl StreamRowIdGen { let base = PlanBase::new_stream( input.ctx(), input.schema().clone(), - input.logical_pk().to_vec(), + input.stream_key().map(|v| v.to_vec()), input.functional_dependency().clone(), distribution, input.append_only(), diff --git a/src/frontend/src/optimizer/plan_node/stream_share.rs b/src/frontend/src/optimizer/plan_node/stream_share.rs index 93e8030d49566..3acf0b132805e 100644 --- a/src/frontend/src/optimizer/plan_node/stream_share.rs +++ b/src/frontend/src/optimizer/plan_node/stream_share.rs @@ -16,31 +16,34 @@ use pretty_xmlish::XmlNode; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::PbStreamNode; +use super::generic::GenericPlanRef; +use super::stream::StreamPlanRef; use super::utils::Distill; use super::{generic, ExprRewritable, PlanRef, PlanTreeNodeUnary, StreamExchange, StreamNode}; use crate::optimizer::plan_node::{LogicalShare, PlanBase, PlanTreeNode}; use crate::stream_fragmenter::BuildFragmentGraphState; +use crate::Explain; /// `StreamShare` will be translated into an `ExchangeNode` based on its distribution finally. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamShare { pub base: PlanBase, - logical: generic::Share, + core: generic::Share, } impl StreamShare { - pub fn new(logical: generic::Share) -> Self { - let input = logical.input.borrow().0.clone(); + pub fn new(core: generic::Share) -> Self { + let input = core.input.borrow().0.clone(); let dist = input.distribution().clone(); // Filter executor won't change the append-only behavior of the stream. - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, input.append_only(), input.emit_on_window_close(), input.watermark_columns().clone(), ); - StreamShare { base, logical } + StreamShare { base, core } } } @@ -52,19 +55,19 @@ impl Distill for StreamShare { impl PlanTreeNodeUnary for StreamShare { fn input(&self) -> PlanRef { - self.logical.input.borrow().clone() + self.core.input.borrow().clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let logical = self.logical.clone(); - logical.replace_input(input); - Self::new(logical) + let core = self.core.clone(); + core.replace_input(input); + Self::new(core) } } impl StreamShare { pub fn replace_input(&self, plan: PlanRef) { - self.logical.replace_input(plan); + self.core.replace_input(plan); } } @@ -78,7 +81,7 @@ impl StreamNode for StreamShare { impl StreamShare { pub fn adhoc_to_stream_prost(&self, state: &mut BuildFragmentGraphState) -> PbStreamNode { - let operator_id = self.base.id.0 as u32; + let operator_id = self.base.id().0 as u32; match state.get_share_stream_node(operator_id) { None => { @@ -96,7 +99,13 @@ impl StreamShare { identity: self.distill_to_string(), node_body: Some(node_body), operator_id: self.id().0 as _, - stream_key: self.logical_pk().iter().map(|x| *x as u32).collect(), + stream_key: self + .stream_key() + .unwrap_or_else(|| panic!("should always have a stream key in the stream plan but not, sub plan: {}", + PlanRef::from(self.clone()).explain_to_string())) + .iter() + .map(|x| *x as u32) + .collect(), fields: self.schema().to_prost(), append_only: self.append_only(), }; diff --git a/src/frontend/src/optimizer/plan_node/stream_simple_agg.rs b/src/frontend/src/optimizer/plan_node/stream_simple_agg.rs index ce3de79153b31..92d96fdf21b08 100644 --- a/src/frontend/src/optimizer/plan_node/stream_simple_agg.rs +++ b/src/frontend/src/optimizer/plan_node/stream_simple_agg.rs @@ -21,23 +21,25 @@ use super::generic::{self, PlanAggCall}; use super::utils::{childless_record, plan_node_name, Distill}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::expr::ExprRewriter; +use crate::optimizer::plan_node::generic::PhysicalPlanRef; +use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::property::Distribution; use crate::stream_fragmenter::BuildFragmentGraphState; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamSimpleAgg { pub base: PlanBase, - logical: generic::Agg, + core: generic::Agg, /// The index of `count(*)` in `agg_calls`. row_count_idx: usize, } impl StreamSimpleAgg { - pub fn new(logical: generic::Agg, row_count_idx: usize) -> Self { - assert_eq!(logical.agg_calls[row_count_idx], PlanAggCall::count_star()); + pub fn new(core: generic::Agg, row_count_idx: usize) -> Self { + assert_eq!(core.agg_calls[row_count_idx], PlanAggCall::count_star()); - let input = logical.input.clone(); + let input = core.input.clone(); let input_dist = input.distribution(); let dist = match input_dist { Distribution::Single => Distribution::Single, @@ -45,20 +47,19 @@ impl StreamSimpleAgg { }; // Empty because watermark column(s) must be in group key and simple agg have no group key. - let watermark_columns = FixedBitSet::with_capacity(logical.output_len()); + let watermark_columns = FixedBitSet::with_capacity(core.output_len()); // Simple agg executor might change the append-only behavior of the stream. - let base = - PlanBase::new_stream_with_logical(&logical, dist, false, false, watermark_columns); + let base = PlanBase::new_stream_with_core(&core, dist, false, false, watermark_columns); StreamSimpleAgg { base, - logical, + core, row_count_idx, } } pub fn agg_calls(&self) -> &[PlanAggCall] { - &self.logical.agg_calls + &self.core.agg_calls } } @@ -67,19 +68,19 @@ impl Distill for StreamSimpleAgg { let name = plan_node_name!("StreamSimpleAgg", { "append_only", self.input().append_only() }, ); - childless_record(name, self.logical.fields_pretty()) + childless_record(name, self.core.fields_pretty()) } } impl PlanTreeNodeUnary for StreamSimpleAgg { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { let logical = generic::Agg { input, - ..self.logical.clone() + ..self.core.clone() }; Self::new(logical, self.row_count_idx) } @@ -89,8 +90,8 @@ impl_plan_tree_node_for_unary! { StreamSimpleAgg } impl StreamNode for StreamSimpleAgg { fn to_stream_prost_body(&self, state: &mut BuildFragmentGraphState) -> PbNodeBody { use risingwave_pb::stream_plan::*; - let (result_table, agg_states, distinct_dedup_tables) = - self.logical.infer_tables(&self.base, None, None); + let (intermediate_state_table, agg_states, distinct_dedup_tables) = + self.core.infer_tables(&self.base, None, None); PbNodeBody::SimpleAgg(SimpleAggNode { agg_calls: self @@ -100,7 +101,7 @@ impl StreamNode for StreamSimpleAgg { .collect(), distribution_key: self .base - .dist + .distribution() .dist_column_indices() .iter() .map(|idx| *idx as u32) @@ -110,8 +111,8 @@ impl StreamNode for StreamSimpleAgg { .into_iter() .map(|s| s.into_prost(state)) .collect(), - result_table: Some( - result_table + intermediate_state_table: Some( + intermediate_state_table .with_id(state.gen_table_id_wrapped()) .to_internal_table_prost(), ), @@ -138,8 +139,8 @@ impl ExprRewritable for StreamSimpleAgg { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.row_count_idx).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.row_count_idx).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_sink.rs b/src/frontend/src/optimizer/plan_node/stream_sink.rs index 60e89abf3f5c9..32e9fb487910c 100644 --- a/src/frontend/src/optimizer/plan_node/stream_sink.rs +++ b/src/frontend/src/optimizer/plan_node/stream_sink.rs @@ -15,6 +15,7 @@ use std::assert_matches::assert_matches; use std::io::{Error, ErrorKind}; +use anyhow::anyhow; use fixedbitset::FixedBitSet; use itertools::Itertools; use pretty_xmlish::{Pretty, XmlNode}; @@ -23,17 +24,20 @@ use risingwave_common::constants::log_store::{ EPOCH_COLUMN_INDEX, KV_LOG_STORE_PREDEFINED_COLUMNS, SEQ_ID_COLUMN_INDEX, }; use risingwave_common::error::{ErrorCode, Result}; +use risingwave_common::session_config::sink_decouple::SinkDecouple; use risingwave_common::util::sort_util::OrderType; +use risingwave_connector::match_sink_name_str; use risingwave_connector::sink::catalog::desc::SinkDesc; -use risingwave_connector::sink::catalog::{SinkId, SinkType}; +use risingwave_connector::sink::catalog::{SinkFormat, SinkFormatDesc, SinkId, SinkType}; use risingwave_connector::sink::{ - SINK_TYPE_APPEND_ONLY, SINK_TYPE_DEBEZIUM, SINK_TYPE_OPTION, SINK_TYPE_UPSERT, - SINK_USER_FORCE_APPEND_ONLY_OPTION, + SinkError, CONNECTOR_TYPE_KEY, SINK_TYPE_APPEND_ONLY, SINK_TYPE_DEBEZIUM, SINK_TYPE_OPTION, + SINK_TYPE_UPSERT, SINK_USER_FORCE_APPEND_ONLY_OPTION, }; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use tracing::info; use super::derive::{derive_columns, derive_pk}; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill, IndicesDisplay, TableCatalogBuilder}; use super::{ExprRewritable, PlanBase, PlanRef, StreamNode}; use crate::optimizer::plan_node::PlanTreeNodeUnary; @@ -54,7 +58,7 @@ pub struct StreamSink { impl StreamSink { #[must_use] pub fn new(input: PlanRef, sink_desc: SinkDesc) -> Self { - let base = PlanBase::derive_stream_plan_base(&input); + let base = input.plan_base().clone_with_new_plan_id(); Self { base, input, @@ -78,6 +82,7 @@ impl StreamSink { out_names: Vec, definition: String, properties: WithOptions, + format_desc: Option, ) -> Result { let columns = derive_columns(input.schema(), out_names, &user_cols)?; let (input, sink) = Self::derive_sink_desc( @@ -90,8 +95,27 @@ impl StreamSink { columns, definition, properties, + format_desc, )?; + // check and ensure that the sink connector is specified and supported + match sink.properties.get(CONNECTOR_TYPE_KEY) { + Some(connector) => match_sink_name_str!( + connector.to_lowercase().as_str(), + SinkType, + Ok(()), + |other| Err(SinkError::Config(anyhow!( + "unsupported sink type {}", + other + ))) + )?, + None => { + return Err( + SinkError::Config(anyhow!("connector not specified when create sink")).into(), + ); + } + } + Ok(Self::new(input, sink)) } @@ -105,8 +129,10 @@ impl StreamSink { columns: Vec, definition: String, properties: WithOptions, + format_desc: Option, ) -> Result<(PlanRef, SinkDesc)> { - let sink_type = Self::derive_sink_type(input.append_only(), &properties)?; + let sink_type = + Self::derive_sink_type(input.append_only(), &properties, format_desc.as_ref())?; let (pk, _) = derive_pk(input.clone(), user_order_by, &columns); let downstream_pk = Self::parse_downstream_pk(&columns, properties.get(DOWNSTREAM_PK_KEY))?; @@ -138,7 +164,7 @@ impl StreamSink { } _ => { assert_matches!(user_distributed_by, RequiredDist::Any); - RequiredDist::shard_by_key(input.schema().len(), input.logical_pk()) + RequiredDist::shard_by_key(input.schema().len(), input.expect_stream_key()) } } } @@ -157,11 +183,12 @@ impl StreamSink { distribution_key, properties: properties.into_inner(), sink_type, + format_desc, }; Ok((input, sink_desc)) } - fn derive_sink_type(input_append_only: bool, properties: &WithOptions) -> Result { + fn is_user_defined_append_only(properties: &WithOptions) -> Result { if let Some(sink_type) = properties.get(SINK_TYPE_OPTION) { if sink_type != SINK_TYPE_APPEND_ONLY && sink_type != SINK_TYPE_DEBEZIUM @@ -180,7 +207,10 @@ impl StreamSink { .into()); } } + Ok(properties.value_eq_ignore_case(SINK_TYPE_OPTION, SINK_TYPE_APPEND_ONLY)) + } + fn is_user_force_append_only(properties: &WithOptions) -> Result { if properties.contains_key(SINK_USER_FORCE_APPEND_ONLY_OPTION) && !properties.value_eq_ignore_case(SINK_USER_FORCE_APPEND_ONLY_OPTION, "true") && !properties.value_eq_ignore_case(SINK_USER_FORCE_APPEND_ONLY_OPTION, "false") @@ -194,12 +224,25 @@ impl StreamSink { ))) .into()); } + Ok(properties.value_eq_ignore_case(SINK_USER_FORCE_APPEND_ONLY_OPTION, "true")) + } + fn derive_sink_type( + input_append_only: bool, + properties: &WithOptions, + format_desc: Option<&SinkFormatDesc>, + ) -> Result { let frontend_derived_append_only = input_append_only; - let user_defined_append_only = - properties.value_eq_ignore_case(SINK_TYPE_OPTION, SINK_TYPE_APPEND_ONLY); - let user_force_append_only = - properties.value_eq_ignore_case(SINK_USER_FORCE_APPEND_ONLY_OPTION, "true"); + let (user_defined_append_only, user_force_append_only) = match format_desc { + Some(f) => ( + f.format == SinkFormat::AppendOnly, + Self::is_user_force_append_only(&WithOptions::from_inner(f.options.clone()))?, + ), + None => ( + Self::is_user_defined_append_only(properties)?, + Self::is_user_force_append_only(properties)?, + ), + }; match ( frontend_derived_append_only, @@ -212,14 +255,14 @@ impl StreamSink { (false, true, false) => { Err(ErrorCode::SinkError(Box::new(Error::new( ErrorKind::InvalidInput, - "The sink cannot be append-only. Please add \"force_append_only='true'\" in WITH options to force the sink to be append-only. Notice that this will cause the sink executor to drop any UPDATE or DELETE message.", + "The sink cannot be append-only. Please add \"force_append_only='true'\" in options to force the sink to be append-only. Notice that this will cause the sink executor to drop any UPDATE or DELETE message.", ))) .into()) } (_, false, true) => { Err(ErrorCode::SinkError(Box::new(Error::new( ErrorKind::InvalidInput, - "Cannot force the sink to be append-only without \"type='append-only'\"in WITH options.", + "Cannot force the sink to be append-only without \"FORMAT PLAIN\" or \"type='append-only'\".", ))) .into()) } @@ -347,7 +390,7 @@ impl Distill for StreamSink { .iter() .map(|k| k.column_index) .collect_vec(), - schema: &self.base.schema, + schema: self.base.schema(), }; vec.push(("pk", pk.distill())); } @@ -367,10 +410,27 @@ impl StreamNode for StreamSink { PbNodeBody::Sink(SinkNode { sink_desc: Some(self.sink_desc.to_proto()), table: Some(table.to_internal_table_prost()), - log_store_type: if self.base.ctx.session_ctx().config().get_sink_decouple() { - SinkLogStoreType::KvLogStore as i32 - } else { - SinkLogStoreType::InMemoryLogStore as i32 + log_store_type: match self.base.ctx().session_ctx().config().get_sink_decouple() { + SinkDecouple::Default => { + let enable_sink_decouple = + match_sink_name_str!( + self.sink_desc.properties.get(CONNECTOR_TYPE_KEY).expect( + "have checked connector is contained when create the `StreamSink`" + ).to_lowercase().as_str(), + SinkTypeName, + SinkTypeName::default_sink_decouple(&self.sink_desc), + |_unsupported| unreachable!( + "have checked connector is supported when create the `StreamSink`" + ) + ); + if enable_sink_decouple { + SinkLogStoreType::KvLogStore as i32 + } else { + SinkLogStoreType::InMemoryLogStore as i32 + } + } + SinkDecouple::Enable => SinkLogStoreType::KvLogStore as i32, + SinkDecouple::Disable => SinkLogStoreType::InMemoryLogStore as i32, }, }) } diff --git a/src/frontend/src/optimizer/plan_node/stream_sort.rs b/src/frontend/src/optimizer/plan_node/stream_sort.rs index ea130a18af721..41a56a0fd5df2 100644 --- a/src/frontend/src/optimizer/plan_node/stream_sort.rs +++ b/src/frontend/src/optimizer/plan_node/stream_sort.rs @@ -20,6 +20,8 @@ use risingwave_common::catalog::FieldDisplay; use risingwave_common::util::sort_util::OrderType; use risingwave_pb::stream_plan::stream_node::PbNodeBody; +use super::generic::{GenericPlanRef, PhysicalPlanRef}; +use super::stream::StreamPlanRef; use super::utils::{childless_record, Distill, TableCatalogBuilder}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -48,7 +50,7 @@ impl StreamEowcSort { assert!(input.watermark_columns().contains(sort_column_index)); let schema = input.schema().clone(); - let logical_pk = input.logical_pk().to_vec(); + let stream_key = input.stream_key().map(|v| v.to_vec()); let fd_set = input.functional_dependency().clone(); let dist = input.distribution().clone(); let mut watermark_columns = FixedBitSet::with_capacity(input.schema().len()); @@ -56,7 +58,7 @@ impl StreamEowcSort { let base = PlanBase::new_stream( input.ctx(), schema, - logical_pk, + stream_key, fd_set, dist, true, @@ -84,7 +86,7 @@ impl StreamEowcSort { tbl_builder.add_order_column(self.sort_column_index, OrderType::ascending()); order_cols.insert(self.sort_column_index); - let dist_key = self.base.dist.dist_column_indices().to_vec(); + let dist_key = self.base.distribution().dist_column_indices().to_vec(); for idx in &dist_key { if !order_cols.contains(idx) { tbl_builder.add_order_column(*idx, OrderType::ascending()); @@ -92,7 +94,7 @@ impl StreamEowcSort { } } - for idx in self.input.logical_pk() { + for idx in self.input.expect_stream_key() { if !order_cols.contains(idx) { tbl_builder.add_order_column(*idx, OrderType::ascending()); order_cols.insert(*idx); diff --git a/src/frontend/src/optimizer/plan_node/stream_source.rs b/src/frontend/src/optimizer/plan_node/stream_source.rs index 3172c4c06f80c..ae66cf568118b 100644 --- a/src/frontend/src/optimizer/plan_node/stream_source.rs +++ b/src/frontend/src/optimizer/plan_node/stream_source.rs @@ -23,6 +23,7 @@ use risingwave_pb::stream_plan::{PbStreamSource, SourceNode}; use super::utils::{childless_record, Distill}; use super::{generic, ExprRewritable, PlanBase, StreamNode}; use crate::catalog::source_catalog::SourceCatalog; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::utils::column_names_pretty; use crate::optimizer::property::Distribution; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -31,23 +32,23 @@ use crate::stream_fragmenter::BuildFragmentGraphState; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamSource { pub base: PlanBase, - logical: generic::Source, + pub(crate) core: generic::Source, } impl StreamSource { - pub fn new(logical: generic::Source) -> Self { - let base = PlanBase::new_stream_with_logical( - &logical, + pub fn new(core: generic::Source) -> Self { + let base = PlanBase::new_stream_with_core( + &core, Distribution::SomeShard, - logical.catalog.as_ref().map_or(true, |s| s.append_only), + core.catalog.as_ref().map_or(true, |s| s.append_only), false, - FixedBitSet::with_capacity(logical.column_catalog.len()), + FixedBitSet::with_capacity(core.column_catalog.len()), ); - Self { base, logical } + Self { base, core } } pub fn source_catalog(&self) -> Option> { - self.logical.catalog.clone() + self.core.catalog.clone() } } @@ -78,14 +79,20 @@ impl StreamNode for StreamSource { .to_internal_table_prost(), ), info: Some(source_catalog.info.clone()), - row_id_index: self.logical.row_id_index.map(|index| index as _), + row_id_index: self.core.row_id_index.map(|index| index as _), columns: self - .logical + .core .column_catalog .iter() .map(|c| c.to_protobuf()) .collect_vec(), properties: source_catalog.properties.clone().into_iter().collect(), + rate_limit: self + .base + .ctx() + .session_ctx() + .config() + .get_streaming_rate_limit(), }); PbNodeBody::Source(SourceNode { source_inner }) } diff --git a/src/frontend/src/optimizer/plan_node/stream_stateless_simple_agg.rs b/src/frontend/src/optimizer/plan_node/stream_stateless_simple_agg.rs index b69ff2b518cbd..474582ec877c7 100644 --- a/src/frontend/src/optimizer/plan_node/stream_stateless_simple_agg.rs +++ b/src/frontend/src/optimizer/plan_node/stream_stateless_simple_agg.rs @@ -20,7 +20,7 @@ use super::generic::{self, PlanAggCall}; use super::utils::impl_distill_by_unit; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::expr::ExprRewriter; -use crate::optimizer::plan_node::stream::StreamPlanRef; +use crate::optimizer::plan_node::generic::PhysicalPlanRef; use crate::optimizer::property::RequiredDist; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -33,52 +33,48 @@ use crate::stream_fragmenter::BuildFragmentGraphState; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamStatelessSimpleAgg { pub base: PlanBase, - logical: generic::Agg, + core: generic::Agg, } impl StreamStatelessSimpleAgg { - pub fn new(logical: generic::Agg) -> Self { - let input = logical.input.clone(); + pub fn new(core: generic::Agg) -> Self { + let input = core.input.clone(); let input_dist = input.distribution(); debug_assert!(input_dist.satisfies(&RequiredDist::AnyShard)); - let mut watermark_columns = FixedBitSet::with_capacity(logical.output_len()); + let mut watermark_columns = FixedBitSet::with_capacity(core.output_len()); // Watermark column(s) must be in group key. - for (idx, input_idx) in logical.group_key.indices().enumerate() { + for (idx, input_idx) in core.group_key.indices().enumerate() { if input.watermark_columns().contains(input_idx) { watermark_columns.insert(idx); } } - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, input_dist.clone(), input.append_only(), input.emit_on_window_close(), watermark_columns, ); - StreamStatelessSimpleAgg { base, logical } + StreamStatelessSimpleAgg { base, core } } pub fn agg_calls(&self) -> &[PlanAggCall] { - &self.logical.agg_calls + &self.core.agg_calls } } -impl_distill_by_unit!( - StreamStatelessSimpleAgg, - logical, - "StreamStatelessSimpleAgg" -); +impl_distill_by_unit!(StreamStatelessSimpleAgg, core, "StreamStatelessSimpleAgg"); impl PlanTreeNodeUnary for StreamStatelessSimpleAgg { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new(logical) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } impl_plan_tree_node_for_unary! { StreamStatelessSimpleAgg } @@ -100,7 +96,7 @@ impl StreamNode for StreamStatelessSimpleAgg { .map(|idx| *idx as u32) .collect_vec(), agg_call_states: vec![], - result_table: None, + intermediate_state_table: None, is_append_only: self.input().append_only(), distinct_dedup_tables: Default::default(), }) @@ -113,8 +109,8 @@ impl ExprRewritable for StreamStatelessSimpleAgg { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_table_scan.rs b/src/frontend/src/optimizer/plan_node/stream_table_scan.rs index 51f61e0f663b8..965ca217a3369 100644 --- a/src/frontend/src/optimizer/plan_node/stream_table_scan.rs +++ b/src/frontend/src/optimizer/plan_node/stream_table_scan.rs @@ -24,15 +24,17 @@ use risingwave_common::util::sort_util::OrderType; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::{ChainType, PbStreamNode}; +use super::generic::PhysicalPlanRef; use super::utils::{childless_record, Distill}; use super::{generic, ExprRewritable, PlanBase, PlanNodeId, PlanRef, StreamNode}; use crate::catalog::ColumnId; use crate::expr::{ExprRewriter, FunctionCall}; use crate::optimizer::plan_node::generic::GenericPlanRef; +use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::utils::{IndicesDisplay, TableCatalogBuilder}; use crate::optimizer::property::{Distribution, DistributionDisplay}; use crate::stream_fragmenter::BuildFragmentGraphState; -use crate::TableCatalog; +use crate::{Explain, TableCatalog}; /// `StreamTableScan` is a virtual plan node to represent a stream table scan. It will be converted /// to chain + merge node (for upstream materialize) + batch table scan when converting to `MView` @@ -40,56 +42,53 @@ use crate::TableCatalog; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamTableScan { pub base: PlanBase, - logical: generic::Scan, + core: generic::Scan, batch_plan_id: PlanNodeId, chain_type: ChainType, } impl StreamTableScan { - pub fn new(logical: generic::Scan) -> Self { - Self::new_with_chain_type(logical, ChainType::Backfill) + pub fn new(core: generic::Scan) -> Self { + Self::new_with_chain_type(core, ChainType::Backfill) } - pub fn new_with_chain_type(logical: generic::Scan, chain_type: ChainType) -> Self { - let batch_plan_id = logical.ctx.next_plan_node_id(); + pub fn new_with_chain_type(core: generic::Scan, chain_type: ChainType) -> Self { + let batch_plan_id = core.ctx.next_plan_node_id(); let distribution = { - match logical.distribution_key() { + match core.distribution_key() { Some(distribution_key) => { if distribution_key.is_empty() { Distribution::Single } else { // See also `BatchSeqScan::clone_with_dist`. - Distribution::UpstreamHashShard( - distribution_key, - logical.table_desc.table_id, - ) + Distribution::UpstreamHashShard(distribution_key, core.table_desc.table_id) } } None => Distribution::SomeShard, } }; - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, distribution, - logical.table_desc.append_only, + core.table_desc.append_only, false, - logical.watermark_columns(), + core.watermark_columns(), ); Self { base, - logical, + core, batch_plan_id, chain_type, } } pub fn table_name(&self) -> &str { - &self.logical.table_name + &self.core.table_name } - pub fn logical(&self) -> &generic::Scan { - &self.logical + pub fn core(&self) -> &generic::Scan { + &self.core } pub fn to_index_scan( @@ -100,7 +99,7 @@ impl StreamTableScan { function_mapping: &HashMap, chain_type: ChainType, ) -> StreamTableScan { - let logical_index_scan = self.logical.to_index_scan( + let logical_index_scan = self.core.to_index_scan( index_name, index_table_desc, primary_to_secondary_mapping, @@ -118,24 +117,42 @@ impl StreamTableScan { /// Build catalog for backfill state /// - /// Schema - /// ------ - /// | vnode | pk | `backfill_finished` | + /// Schema: | vnode | pk ... | `backfill_finished` | `row_count` | /// - /// key: | vnode | - /// value: | pk | `backfill_finished` + /// key: | vnode | + /// value: | pk ... | `backfill_finished` | `row_count` | /// /// When we update the backfill progress, /// we update it for all vnodes. - /// "pk" here might be confusing. It refers to the - /// upstream pk which we use to track the backfill progress. + /// + /// `pk` refers to the upstream pk which we use to track the backfill progress. + /// + /// `vnode` is the corresponding vnode of the upstream's distribution key. + /// It should also match the vnode of the backfill executor. + /// + /// `backfill_finished` is a boolean which just indicates if backfill is done. + /// + /// `row_count` is a count of rows which indicates the # of rows per executor. + /// We used to track this in memory. + /// But for backfill persistence we have to also persist it. + /// + /// FIXME(kwannoel): + /// - Across all vnodes, the values are the same. + /// - e.g. | vnode | pk ... | `backfill_finished` | `row_count` | + /// | 1002 | Int64(1) | t | 10 | + /// | 1003 | Int64(1) | t | 10 | + /// | 1003 | Int64(1) | t | 10 | + /// Eventually we should track progress per vnode, to support scaling with both mview and + /// the corresponding `no_shuffle_backfill`. + /// However this is not high priority, since we are working on supporting arrangement backfill, + /// which already has this capability. pub fn build_backfill_state_catalog( &self, state: &mut BuildFragmentGraphState, ) -> TableCatalog { let properties = self.ctx().with_options().internal_table_subset(); let mut catalog_builder = TableCatalogBuilder::new(properties); - let upstream_schema = &self.logical.table_desc.columns; + let upstream_schema = &self.core.table_desc.columns; // We use vnode as primary key in state table. // If `Distribution::Single`, vnode will just be `VirtualNode::default()`. @@ -143,7 +160,7 @@ impl StreamTableScan { catalog_builder.add_order_column(0, OrderType::ascending()); // pk columns - for col_order in self.logical.primary_key() { + for col_order in self.core.primary_key() { let col = &upstream_schema[col_order.column_index]; catalog_builder.add_column(&Field::from(col)); } @@ -154,6 +171,12 @@ impl StreamTableScan { format!("{}_backfill_finished", self.table_name()), )); + // `row_count` column + catalog_builder.add_column(&Field::with_name( + DataType::Int64, + format!("{}_row_count", self.table_name()), + )); + // Reuse the state store pk (vnode) as the vnode as well. catalog_builder.set_vnode_col_idx(0); catalog_builder.set_dist_key_in_pk(vec![0]); @@ -171,20 +194,20 @@ impl_plan_tree_node_for_leaf! { StreamTableScan } impl Distill for StreamTableScan { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(4); - vec.push(("table", Pretty::from(self.logical.table_name.clone()))); - vec.push(("columns", self.logical.columns_pretty(verbose))); + vec.push(("table", Pretty::from(self.core.table_name.clone()))); + vec.push(("columns", self.core.columns_pretty(verbose))); if verbose { let pk = IndicesDisplay { - indices: self.logical_pk(), - schema: &self.base.schema, + indices: self.stream_key().unwrap_or_default(), + schema: self.base.schema(), }; vec.push(("pk", pk.distill())); let dist = Pretty::display(&DistributionDisplay { distribution: self.distribution(), - input_schema: &self.base.schema, + input_schema: self.base.schema(), }); vec.push(("dist", dist)); } @@ -203,14 +226,24 @@ impl StreamTableScan { pub fn adhoc_to_stream_prost(&self, state: &mut BuildFragmentGraphState) -> PbStreamNode { use risingwave_pb::stream_plan::*; - let stream_key = self.base.logical_pk.iter().map(|x| *x as u32).collect_vec(); + let stream_key = self + .stream_key() + .unwrap_or_else(|| { + panic!( + "should always have a stream key in the stream plan but not, sub plan: {}", + PlanRef::from(self.clone()).explain_to_string() + ) + }) + .iter() + .map(|x| *x as u32) + .collect_vec(); // The required columns from the table (both scan and upstream). let upstream_column_ids = match self.chain_type { // For backfill, we additionally need the primary key columns. - ChainType::Backfill => self.logical.output_and_pk_column_ids(), + ChainType::Backfill => self.core.output_and_pk_column_ids(), ChainType::Chain | ChainType::Rearrange | ChainType::UpstreamOnly => { - self.logical.output_column_ids() + self.core.output_column_ids() } ChainType::ChainUnspecified => unreachable!(), } @@ -223,7 +256,7 @@ impl StreamTableScan { .iter() .map(|&id| { let col = self - .logical + .core .table_desc .columns .iter() @@ -234,7 +267,7 @@ impl StreamTableScan { .collect_vec(); let output_indices = self - .logical + .core .output_column_ids() .iter() .map(|i| { @@ -247,7 +280,7 @@ impl StreamTableScan { // TODO: snapshot read of upstream mview let batch_plan_node = BatchPlanNode { - table_desc: Some(self.logical.table_desc.to_protobuf()), + table_desc: Some(self.core.table_desc.to_protobuf()), column_ids: upstream_column_ids.clone(), }; @@ -277,13 +310,13 @@ impl StreamTableScan { }, ], node_body: Some(PbNodeBody::Chain(ChainNode { - table_id: self.logical.table_desc.table_id.table_id, + table_id: self.core.table_desc.table_id.table_id, chain_type: self.chain_type as i32, // The column indices need to be forwarded to the downstream output_indices, upstream_column_ids, // The table desc used by backfill executor - table_desc: Some(self.logical.table_desc.to_protobuf()), + table_desc: Some(self.core.table_desc.to_protobuf()), state_table: Some(catalog), rate_limit: self .base @@ -291,9 +324,10 @@ impl StreamTableScan { .session_ctx() .config() .get_streaming_rate_limit(), + ..Default::default() })), stream_key, - operator_id: self.base.id.0 as u64, + operator_id: self.base.id().0 as u64, identity: { let s = self.distill_to_string(); s.replace("StreamTableScan", "Chain") @@ -309,8 +343,8 @@ impl ExprRewritable for StreamTableScan { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new_with_chain_type(logical, self.chain_type).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new_with_chain_type(core, self.chain_type).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_temporal_join.rs b/src/frontend/src/optimizer/plan_node/stream_temporal_join.rs index f9fb325b8af8b..675dbeb9ab381 100644 --- a/src/frontend/src/optimizer/plan_node/stream_temporal_join.rs +++ b/src/frontend/src/optimizer/plan_node/stream_temporal_join.rs @@ -18,11 +18,12 @@ use risingwave_pb::plan_common::JoinType; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::TemporalJoinNode; +use super::generic::GenericPlanRef; +use super::stream::StreamPlanRef; use super::utils::{childless_record, watermark_pretty, Distill}; use super::{generic, ExprRewritable, PlanBase, PlanRef, PlanTreeNodeBinary, StreamNode}; use crate::expr::{Expr, ExprRewriter}; use crate::optimizer::plan_node::plan_tree_node::PlanTreeNodeUnary; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::utils::IndicesDisplay; use crate::optimizer::plan_node::{ EqJoinPredicate, EqJoinPredicateDisplay, StreamExchange, StreamTableScan, @@ -33,15 +34,15 @@ use crate::utils::ColIndexMappingRewriteExt; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamTemporalJoin { pub base: PlanBase, - logical: generic::Join, + core: generic::Join, eq_join_predicate: EqJoinPredicate, } impl StreamTemporalJoin { - pub fn new(logical: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { - assert!(logical.join_type == JoinType::Inner || logical.join_type == JoinType::LeftOuter); - assert!(logical.left.append_only()); - let right = logical.right.clone(); + pub fn new(core: generic::Join, eq_join_predicate: EqJoinPredicate) -> Self { + assert!(core.join_type == JoinType::Inner || core.join_type == JoinType::LeftOuter); + assert!(core.left.append_only()); + let right = core.right.clone(); let exchange: &StreamExchange = right .as_stream_exchange() .expect("should be a no shuffle stream exchange"); @@ -50,22 +51,20 @@ impl StreamTemporalJoin { let scan: &StreamTableScan = exchange_input .as_stream_table_scan() .expect("should be a stream table scan"); - assert!(scan.logical().for_system_time_as_of_proctime); + assert!(scan.core().for_system_time_as_of_proctime); - let l2o = logical - .l2i_col_mapping() - .composite(&logical.i2o_col_mapping()); - let dist = l2o.rewrite_provided_distribution(logical.left.distribution()); + let l2o = core.l2i_col_mapping().composite(&core.i2o_col_mapping()); + let dist = l2o.rewrite_provided_distribution(core.left.distribution()); // Use left side watermark directly. - let watermark_columns = logical.i2o_col_mapping().rewrite_bitset( - &logical + let watermark_columns = core.i2o_col_mapping().rewrite_bitset( + &core .l2i_col_mapping() - .rewrite_bitset(logical.left.watermark_columns()), + .rewrite_bitset(core.left.watermark_columns()), ); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, true, false, // TODO(rc): derive EOWC property from input @@ -74,14 +73,14 @@ impl StreamTemporalJoin { Self { base, - logical, + core, eq_join_predicate, } } /// Get join type pub fn join_type(&self) -> JoinType { - self.logical.join_type + self.core.join_type } pub fn eq_join_predicate(&self) -> &EqJoinPredicate { @@ -91,11 +90,11 @@ impl StreamTemporalJoin { impl Distill for StreamTemporalJoin { fn distill<'a>(&self) -> XmlNode<'a> { - let verbose = self.base.ctx.is_explain_verbose(); + let verbose = self.base.ctx().is_explain_verbose(); let mut vec = Vec::with_capacity(if verbose { 3 } else { 2 }); - vec.push(("type", Pretty::debug(&self.logical.join_type))); + vec.push(("type", Pretty::debug(&self.core.join_type))); - let concat_schema = self.logical.concat_schema(); + let concat_schema = self.core.concat_schema(); vec.push(( "predicate", Pretty::debug(&EqJoinPredicateDisplay { @@ -104,12 +103,12 @@ impl Distill for StreamTemporalJoin { }), )); - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { vec.push(("output_watermarks", ow)); } if verbose { - let data = IndicesDisplay::from_join(&self.logical, &concat_schema); + let data = IndicesDisplay::from_join(&self.core, &concat_schema); vec.push(("output", data)); } @@ -119,18 +118,18 @@ impl Distill for StreamTemporalJoin { impl PlanTreeNodeBinary for StreamTemporalJoin { fn left(&self) -> PlanRef { - self.logical.left.clone() + self.core.left.clone() } fn right(&self) -> PlanRef { - self.logical.right.clone() + self.core.right.clone() } fn clone_with_left_right(&self, left: PlanRef, right: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.left = left; - logical.right = right; - Self::new(logical, self.eq_join_predicate.clone()) + let mut core = self.core.clone(); + core.left = left; + core.right = right; + Self::new(core, self.eq_join_predicate.clone()) } } @@ -156,7 +155,7 @@ impl StreamNode for StreamTemporalJoin { .expect("should be a stream table scan"); NodeBody::TemporalJoin(TemporalJoinNode { - join_type: self.logical.join_type as i32, + join_type: self.core.join_type as i32, left_key: left_jk_indices_prost, right_key: right_jk_indices_prost, null_safe: null_safe_prost, @@ -165,19 +164,9 @@ impl StreamNode for StreamTemporalJoin { .other_cond() .as_expr_unless_true() .map(|x| x.to_expr_proto()), - output_indices: self - .logical - .output_indices - .iter() - .map(|&x| x as u32) - .collect(), - table_desc: Some(scan.logical().table_desc.to_protobuf()), - table_output_indices: scan - .logical() - .output_col_idx - .iter() - .map(|&i| i as _) - .collect(), + output_indices: self.core.output_indices.iter().map(|&x| x as u32).collect(), + table_desc: Some(scan.core().table_desc.to_protobuf()), + table_output_indices: scan.core().output_col_idx.iter().map(|&i| i as _).collect(), }) } } @@ -188,8 +177,8 @@ impl ExprRewritable for StreamTemporalJoin { } fn rewrite_exprs(&self, r: &mut dyn ExprRewriter) -> PlanRef { - let mut logical = self.logical.clone(); - logical.rewrite_exprs(r); - Self::new(logical, self.eq_join_predicate.rewrite_exprs(r)).into() + let mut core = self.core.clone(); + core.rewrite_exprs(r); + Self::new(core, self.eq_join_predicate.rewrite_exprs(r)).into() } } diff --git a/src/frontend/src/optimizer/plan_node/stream_topn.rs b/src/frontend/src/optimizer/plan_node/stream_topn.rs index 83ab3820bd376..87890625f6be7 100644 --- a/src/frontend/src/optimizer/plan_node/stream_topn.rs +++ b/src/frontend/src/optimizer/plan_node/stream_topn.rs @@ -26,46 +26,34 @@ use crate::stream_fragmenter::BuildFragmentGraphState; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamTopN { pub base: PlanBase, - logical: generic::TopN, + core: generic::TopN, } impl StreamTopN { - fn new_inner(logical: generic::TopN, stream_key: Option>) -> Self { - assert!(logical.group_key.is_empty()); - assert!(logical.limit_attr.limit() > 0); - let input = &logical.input; + pub fn new(core: generic::TopN) -> Self { + assert!(core.group_key.is_empty()); + assert!(core.limit_attr.limit() > 0); + let input = &core.input; let dist = match input.distribution() { Distribution::Single => Distribution::Single, _ => panic!(), }; let watermark_columns = FixedBitSet::with_capacity(input.schema().len()); - let mut base = - PlanBase::new_stream_with_logical(&logical, dist, false, false, watermark_columns); - if let Some(stream_key) = stream_key { - base.logical_pk = stream_key; - } - StreamTopN { base, logical } - } - - pub fn new(logical: generic::TopN) -> Self { - Self::new_inner(logical, None) - } - - pub fn with_stream_key(logical: generic::TopN, stream_key: Vec) -> Self { - Self::new_inner(logical, Some(stream_key)) + let base = PlanBase::new_stream_with_core(&core, dist, false, false, watermark_columns); + StreamTopN { base, core } } pub fn limit_attr(&self) -> TopNLimit { - self.logical.limit_attr + self.core.limit_attr } pub fn offset(&self) -> u64 { - self.logical.offset + self.core.offset } pub fn topn_order(&self) -> &Order { - &self.logical.order + &self.core.order } } @@ -74,19 +62,19 @@ impl Distill for StreamTopN { let name = plan_node_name!("StreamTopN", { "append_only", self.input().append_only() }, ); - self.logical.distill_with_name(name) + self.core.distill_with_name(name) } } impl PlanTreeNodeUnary for StreamTopN { fn input(&self) -> PlanRef { - self.logical.input.clone() + self.core.input.clone() } fn clone_with_input(&self, input: PlanRef) -> Self { - let mut logical = self.logical.clone(); - logical.input = input; - Self::new_inner(logical, Some(self.logical_pk().to_vec())) + let mut core = self.core.clone(); + core.input = input; + Self::new(core) } } @@ -102,11 +90,11 @@ impl StreamNode for StreamTopN { offset: self.offset(), with_ties: self.limit_attr().with_ties(), table: Some( - self.logical + self.core .infer_internal_table_catalog( input.schema(), input.ctx(), - input.logical_pk(), + input.expect_stream_key(), None, ) .with_id(state.gen_table_id_wrapped()) diff --git a/src/frontend/src/optimizer/plan_node/stream_union.rs b/src/frontend/src/optimizer/plan_node/stream_union.rs index 74e8dbcf5c48e..6d6dca2d8dd02 100644 --- a/src/frontend/src/optimizer/plan_node/stream_union.rs +++ b/src/frontend/src/optimizer/plan_node/stream_union.rs @@ -19,10 +19,11 @@ use pretty_xmlish::XmlNode; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::UnionNode; +use super::generic::GenericPlanRef; +use super::stream::StreamPlanRef; use super::utils::{childless_record, watermark_pretty, Distill}; use super::{generic, ExprRewritable, PlanRef}; use crate::optimizer::plan_node::generic::GenericPlanNode; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::plan_node::{PlanBase, PlanTreeNode, StreamNode}; use crate::stream_fragmenter::BuildFragmentGraphState; @@ -30,38 +31,38 @@ use crate::stream_fragmenter::BuildFragmentGraphState; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamUnion { pub base: PlanBase, - logical: generic::Union, + core: generic::Union, } impl StreamUnion { - pub fn new(logical: generic::Union) -> Self { - let inputs = &logical.inputs; + pub fn new(core: generic::Union) -> Self { + let inputs = &core.inputs; let dist = inputs[0].distribution().clone(); assert!(inputs.iter().all(|input| *input.distribution() == dist)); let watermark_columns = inputs.iter().fold( { - let mut bitset = FixedBitSet::with_capacity(logical.schema().len()); + let mut bitset = FixedBitSet::with_capacity(core.schema().len()); bitset.toggle_range(..); bitset }, |acc_watermark_columns, input| acc_watermark_columns.bitand(input.watermark_columns()), ); - let base = PlanBase::new_stream_with_logical( - &logical, + let base = PlanBase::new_stream_with_core( + &core, dist, inputs.iter().all(|x| x.append_only()), inputs.iter().all(|x| x.emit_on_window_close()), watermark_columns, ); - StreamUnion { base, logical } + StreamUnion { base, core } } } impl Distill for StreamUnion { fn distill<'a>(&self) -> XmlNode<'a> { - let mut vec = self.logical.fields_pretty(); - if let Some(ow) = watermark_pretty(&self.base.watermark_columns, self.schema()) { + let mut vec = self.core.fields_pretty(); + if let Some(ow) = watermark_pretty(self.base.watermark_columns(), self.schema()) { vec.push(("output_watermarks", ow)); } childless_record("StreamUnion", vec) @@ -70,11 +71,11 @@ impl Distill for StreamUnion { impl PlanTreeNode for StreamUnion { fn inputs(&self) -> smallvec::SmallVec<[crate::optimizer::PlanRef; 2]> { - smallvec::SmallVec::from_vec(self.logical.inputs.clone()) + smallvec::SmallVec::from_vec(self.core.inputs.clone()) } fn clone_with_inputs(&self, inputs: &[crate::optimizer::PlanRef]) -> PlanRef { - let mut new = self.logical.clone(); + let mut new = self.core.clone(); new.inputs = inputs.to_vec(); Self::new(new).into() } diff --git a/src/frontend/src/optimizer/plan_node/stream_values.rs b/src/frontend/src/optimizer/plan_node/stream_values.rs index f4ad303cdf2b6..f8cc5db851159 100644 --- a/src/frontend/src/optimizer/plan_node/stream_values.rs +++ b/src/frontend/src/optimizer/plan_node/stream_values.rs @@ -18,6 +18,7 @@ use risingwave_pb::stream_plan::stream_node::NodeBody as ProstStreamNode; use risingwave_pb::stream_plan::values_node::ExprTuple; use risingwave_pb::stream_plan::ValuesNode; +use super::generic::GenericPlanRef; use super::utils::{childless_record, Distill}; use super::{ExprRewritable, LogicalValues, PlanBase, StreamNode}; use crate::expr::{Expr, ExprImpl}; @@ -40,7 +41,7 @@ impl StreamValues { let base = PlanBase::new_stream( ctx, logical.schema().clone(), - logical.logical_pk().to_vec(), + logical.stream_key().map(|v| v.to_vec()), logical.functional_dependency().clone(), Distribution::Single, true, diff --git a/src/frontend/src/optimizer/plan_node/stream_watermark_filter.rs b/src/frontend/src/optimizer/plan_node/stream_watermark_filter.rs index 0ca939a207926..066bc9a234ca5 100644 --- a/src/frontend/src/optimizer/plan_node/stream_watermark_filter.rs +++ b/src/frontend/src/optimizer/plan_node/stream_watermark_filter.rs @@ -21,6 +21,7 @@ use risingwave_common::util::sort_util::OrderType; use risingwave_pb::catalog::WatermarkDesc; use risingwave_pb::stream_plan::stream_node::PbNodeBody; +use super::stream::StreamPlanRef; use super::utils::{childless_record, watermark_pretty, Distill, TableCatalogBuilder}; use super::{ExprRewritable, PlanBase, PlanRef, PlanTreeNodeUnary, StreamNode}; use crate::expr::{ExprDisplay, ExprImpl}; @@ -43,7 +44,7 @@ impl StreamWatermarkFilter { let base = PlanBase::new_stream( input.ctx(), input.schema().clone(), - input.logical_pk().to_vec(), + input.stream_key().map(|v| v.to_vec()), input.functional_dependency().clone(), input.distribution().clone(), input.append_only(), @@ -85,7 +86,7 @@ impl Distill for StreamWatermarkFilter { }) .collect(); let display_output_watermarks = - watermark_pretty(&self.base.watermark_columns, input_schema).unwrap(); + watermark_pretty(self.base.watermark_columns(), input_schema).unwrap(); let fields = vec![ ("watermark_descs", Pretty::Array(display_watermark_descs)), ("output_watermarks", display_output_watermarks), diff --git a/src/frontend/src/optimizer/plan_node/utils.rs b/src/frontend/src/optimizer/plan_node/utils.rs index 475c5c0e32eb1..f167d73c53a46 100644 --- a/src/frontend/src/optimizer/plan_node/utils.rs +++ b/src/frontend/src/optimizer/plan_node/utils.rs @@ -23,7 +23,7 @@ use risingwave_common::catalog::{ }; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use crate::catalog::table_catalog::TableType; +use crate::catalog::table_catalog::{CreateType, TableType}; use crate::catalog::{ColumnId, FragmentId, TableCatalog, TableId}; use crate::optimizer::property::Cardinality; use crate::utils::WithOptions; @@ -177,6 +177,9 @@ impl TableCatalogBuilder { created_at_epoch: None, initialized_at_epoch: None, cleaned_by_watermark: false, + // NOTE(kwannoel): This may not match the create type of the materialized table. + // It should be ignored for internal tables. + create_type: CreateType::Foreground, } } diff --git a/src/frontend/src/optimizer/plan_rewriter/plan_cloner.rs b/src/frontend/src/optimizer/plan_rewriter/plan_cloner.rs index f30f3d9fa4966..7e53b903ac962 100644 --- a/src/frontend/src/optimizer/plan_rewriter/plan_cloner.rs +++ b/src/frontend/src/optimizer/plan_rewriter/plan_cloner.rs @@ -16,6 +16,7 @@ use std::collections::HashMap; use itertools::Itertools; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{LogicalShare, PlanNodeId, PlanTreeNode, StreamShare}; use crate::optimizer::PlanRewriter; use crate::PlanRef; diff --git a/src/frontend/src/optimizer/plan_rewriter/share_source_rewriter.rs b/src/frontend/src/optimizer/plan_rewriter/share_source_rewriter.rs index db7ecd92c7aa5..5b9efb9fc7c94 100644 --- a/src/frontend/src/optimizer/plan_rewriter/share_source_rewriter.rs +++ b/src/frontend/src/optimizer/plan_rewriter/share_source_rewriter.rs @@ -17,6 +17,7 @@ use std::collections::{HashMap, HashSet}; use itertools::Itertools; use crate::catalog::SourceId; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{ LogicalShare, LogicalSource, PlanNodeId, PlanTreeNode, StreamShare, }; @@ -109,8 +110,10 @@ impl PlanRewriter for ShareSourceRewriter { } } -impl PlanVisitor<()> for SourceCounter { - type DefaultBehavior = impl DefaultBehavior<()>; +impl PlanVisitor for SourceCounter { + type Result = (); + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { DefaultValue diff --git a/src/frontend/src/optimizer/plan_visitor/cardinality_visitor.rs b/src/frontend/src/optimizer/plan_visitor/cardinality_visitor.rs index e563dcf2b41c3..a9eb5639fcf85 100644 --- a/src/frontend/src/optimizer/plan_visitor/cardinality_visitor.rs +++ b/src/frontend/src/optimizer/plan_visitor/cardinality_visitor.rs @@ -18,7 +18,6 @@ use std::ops::{Mul, Sub}; use risingwave_pb::plan_common::JoinType; use super::{DefaultBehavior, DefaultValue, PlanVisitor}; -use crate::catalog::system_catalog::pg_catalog::PG_NAMESPACE_TABLE_NAME; use crate::optimizer::plan_node::generic::TopNLimit; use crate::optimizer::plan_node::{ self, PlanNode, PlanTreeNode, PlanTreeNodeBinary, PlanTreeNodeUnary, @@ -36,26 +35,45 @@ impl CardinalityVisitor { input_card: Cardinality, eq_set: HashSet, ) -> Cardinality { - let mut unique_keys: Vec> = vec![input.logical_pk().iter().copied().collect()]; - + let mut unique_keys: Vec> = if let Some(stream_key) = input.stream_key() { + vec![stream_key.iter().copied().collect()] + } else { + vec![] + }; // We don't have UNIQUE key now. So we hack here to support some complex queries on // system tables. - // TODO(card): remove this after we have UNIQUE key. - if let Some(scan) = input.as_logical_scan() - && scan.is_sys_table() - && scan.table_name() == PG_NAMESPACE_TABLE_NAME + // TODO(card): remove this after we have UNIQUE key. https://github.com/risingwavelabs/risingwave/issues/12514 { - if let Some(nspname) = scan - .output_col_idx() - .iter() - .find(|i| scan.table_desc().columns[**i].name == "nspname") { - unique_keys.push([*nspname].into_iter().collect()); + // Hack for unique key `nspname` on `pg_catalog.pg_namespace` + // + // LogicalFilter { predicate: (rw_schemas.name = ...) } + // (below is expanded logical view, see src/frontend/src/catalog/system_catalog/pg_catalog/pg_namespace.rs) + // └─LogicalProject { exprs: [rw_schemas.id, rw_schemas.name, rw_schemas.owner, rw_schemas.acl] } + // └─LogicalScan { table: rw_schemas, columns: [id, name, owner, acl] } + fn try_get_unique_key_from_pg_namespace(plan: &dyn PlanNode) -> Option> { + let proj = plan.as_logical_project()?; + if !proj.is_identity() { + return None; + } + let scan = proj.input(); + let scan = scan.as_logical_scan()?; + if scan.is_sys_table() && scan.table_name() == "rw_schemas" { + if let Some(name) = scan + .output_col_idx() + .iter() + .find(|i| scan.table_desc().columns[**i].name == "name") + { + return Some([*name].into_iter().collect()); + } + } + None + } + if let Some(unique_key) = try_get_unique_key_from_pg_namespace(input) { + unique_keys.push(unique_key); } } - if unique_keys .iter() - .filter(|unique_key| !unique_key.is_empty()) .any(|unique_key| eq_set.is_superset(unique_key)) { input_card.min(0..=1) @@ -65,8 +83,10 @@ impl CardinalityVisitor { } } -impl PlanVisitor for CardinalityVisitor { - type DefaultBehavior = impl DefaultBehavior; +impl PlanVisitor for CardinalityVisitor { + type Result = Cardinality; + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { // returns unknown cardinality for default behavior, which is always correct diff --git a/src/frontend/src/optimizer/plan_visitor/execution_mode_decider.rs b/src/frontend/src/optimizer/plan_visitor/execution_mode_decider.rs index f1f3d4edfdc74..6eb7f7def64aa 100644 --- a/src/frontend/src/optimizer/plan_visitor/execution_mode_decider.rs +++ b/src/frontend/src/optimizer/plan_visitor/execution_mode_decider.rs @@ -28,8 +28,10 @@ impl ExecutionModeDecider { } } -impl PlanVisitor for ExecutionModeDecider { - type DefaultBehavior = impl DefaultBehavior; +impl PlanVisitor for ExecutionModeDecider { + type Result = bool; + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { Merge(|a, b| a & b) diff --git a/src/frontend/src/optimizer/plan_visitor/input_ref_validator.rs b/src/frontend/src/optimizer/plan_visitor/input_ref_validator.rs index 48bf5bd7b640c..6911c6e8ce89a 100644 --- a/src/frontend/src/optimizer/plan_visitor/input_ref_validator.rs +++ b/src/frontend/src/optimizer/plan_visitor/input_ref_validator.rs @@ -25,7 +25,9 @@ struct ExprVis<'a> { schema: &'a Schema, } -impl ExprVisitor> for ExprVis<'_> { +impl ExprVisitor for ExprVis<'_> { + type Result = Option; + fn visit_input_ref(&mut self, input_ref: &crate::expr::InputRef) -> Option { if input_ref.data_type != self.schema[input_ref.index].data_type { Some(format!( @@ -101,8 +103,10 @@ macro_rules! visit_project { }; } -impl PlanVisitor> for InputRefValidator { - type DefaultBehavior = impl DefaultBehavior>; +impl PlanVisitor for InputRefValidator { + type Result = Option; + + type DefaultBehavior = impl DefaultBehavior; visit_filter!(logical, batch, stream); diff --git a/src/frontend/src/optimizer/plan_visitor/max_one_row_visitor.rs b/src/frontend/src/optimizer/plan_visitor/max_one_row_visitor.rs index 32a6c874efe40..2a3a354674621 100644 --- a/src/frontend/src/optimizer/plan_visitor/max_one_row_visitor.rs +++ b/src/frontend/src/optimizer/plan_visitor/max_one_row_visitor.rs @@ -18,8 +18,10 @@ use crate::optimizer::plan_visitor::PlanVisitor; pub struct HasMaxOneRowApply(); -impl PlanVisitor for HasMaxOneRowApply { - type DefaultBehavior = impl DefaultBehavior; +impl PlanVisitor for HasMaxOneRowApply { + type Result = bool; + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { Merge(|a, b| a | b) diff --git a/src/frontend/src/optimizer/plan_visitor/mod.rs b/src/frontend/src/optimizer/plan_visitor/mod.rs index 7695128f255d4..4f8d5e0c2ade7 100644 --- a/src/frontend/src/optimizer/plan_visitor/mod.rs +++ b/src/frontend/src/optimizer/plan_visitor/mod.rs @@ -80,14 +80,15 @@ macro_rules! def_visitor { ($({ $convention:ident, $name:ident }),*) => { /// The visitor for plan nodes. visit all inputs and return the ret value of the left most input, /// and leaf node returns `R::default()` - pub trait PlanVisitor { - type DefaultBehavior: DefaultBehavior; + pub trait PlanVisitor { + type Result: Default; + type DefaultBehavior: DefaultBehavior; /// The behavior for the default implementations of `visit_xxx`. fn default_behavior() -> Self::DefaultBehavior; paste! { - fn visit(&mut self, plan: PlanRef) -> R{ + fn visit(&mut self, plan: PlanRef) -> Self::Result { match plan.node_type() { $( PlanNodeType::[<$convention $name>] => self.[](plan.downcast_ref::<[<$convention $name>]>().unwrap()), @@ -97,7 +98,7 @@ macro_rules! def_visitor { $( #[doc = "Visit [`" [<$convention $name>] "`] , the function should visit the inputs."] - fn [](&mut self, plan: &[<$convention $name>]) -> R { + fn [](&mut self, plan: &[<$convention $name>]) -> Self::Result { let results = plan.inputs().into_iter().map(|input| self.visit(input)); Self::default_behavior().apply(results) } @@ -121,17 +122,18 @@ macro_rules! impl_has_variant { pred: P, } - impl

PlanVisitor for HasWhere

+ impl

PlanVisitor for HasWhere

where P: FnMut(&$variant) -> bool, { - type DefaultBehavior = impl DefaultBehavior; + type Result = bool; + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { Merge(|a, b| a | b) } - fn [](&mut self, node: &$variant) -> bool { + fn [](&mut self, node: &$variant) -> Self::Result { (self.pred)(node) } } diff --git a/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs b/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs index db3ef0b7acfa4..7dd32e29b98bb 100644 --- a/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs +++ b/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs @@ -40,11 +40,13 @@ impl PlanCorrelatedIdFinder { } } -impl PlanVisitor<()> for PlanCorrelatedIdFinder { +impl PlanVisitor for PlanCorrelatedIdFinder { /// `correlated_input_ref` can only appear in `LogicalProject`, `LogicalFilter`, /// `LogicalJoin` or the `filter` clause of `PlanAggCall` of `LogicalAgg` now. - type DefaultBehavior = impl DefaultBehavior<()>; + type Result = (); + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { DefaultValue @@ -129,7 +131,9 @@ impl ExprCorrelatedIdFinder { } } -impl ExprVisitor<()> for ExprCorrelatedIdFinder { +impl ExprVisitor for ExprCorrelatedIdFinder { + type Result = (); + fn merge(_: (), _: ()) {} fn visit_correlated_input_ref(&mut self, correlated_input_ref: &CorrelatedInputRef) { diff --git a/src/frontend/src/optimizer/plan_visitor/relation_collector_visitor.rs b/src/frontend/src/optimizer/plan_visitor/relation_collector_visitor.rs index 45fd2fdb90b14..770f099aab529 100644 --- a/src/frontend/src/optimizer/plan_visitor/relation_collector_visitor.rs +++ b/src/frontend/src/optimizer/plan_visitor/relation_collector_visitor.rs @@ -42,16 +42,18 @@ impl RelationCollectorVisitor { } } -impl PlanVisitor<()> for RelationCollectorVisitor { - type DefaultBehavior = impl DefaultBehavior<()>; +impl PlanVisitor for RelationCollectorVisitor { + type Result = (); + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { DefaultValue } fn visit_batch_seq_scan(&mut self, plan: &crate::optimizer::plan_node::BatchSeqScan) { - if !plan.logical().is_sys_table { - self.relations.insert(plan.logical().table_desc.table_id); + if !plan.core().is_sys_table { + self.relations.insert(plan.core().table_desc.table_id); } } @@ -62,7 +64,7 @@ impl PlanVisitor<()> for RelationCollectorVisitor { } fn visit_stream_table_scan(&mut self, plan: &StreamTableScan) { - let logical = plan.logical(); + let logical = plan.core(); if !logical.is_sys_table { self.relations.insert(logical.table_desc.table_id); } diff --git a/src/frontend/src/optimizer/plan_visitor/share_parent_counter.rs b/src/frontend/src/optimizer/plan_visitor/share_parent_counter.rs index 2b21f9d806e72..7950b5d81a49c 100644 --- a/src/frontend/src/optimizer/plan_visitor/share_parent_counter.rs +++ b/src/frontend/src/optimizer/plan_visitor/share_parent_counter.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use super::{DefaultBehavior, DefaultValue}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{LogicalShare, PlanNodeId, PlanTreeNodeUnary}; use crate::optimizer::plan_visitor::PlanVisitor; @@ -33,8 +34,10 @@ impl ShareParentCounter { } } -impl PlanVisitor<()> for ShareParentCounter { - type DefaultBehavior = impl DefaultBehavior<()>; +impl PlanVisitor for ShareParentCounter { + type Result = (); + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { DefaultValue diff --git a/src/frontend/src/optimizer/plan_visitor/side_effect_visitor.rs b/src/frontend/src/optimizer/plan_visitor/side_effect_visitor.rs index b7e87e656d58e..f36561d8d4457 100644 --- a/src/frontend/src/optimizer/plan_visitor/side_effect_visitor.rs +++ b/src/frontend/src/optimizer/plan_visitor/side_effect_visitor.rs @@ -19,8 +19,10 @@ use crate::optimizer::plan_node; /// eliminated trivially. pub struct SideEffectVisitor; -impl PlanVisitor for SideEffectVisitor { - type DefaultBehavior = impl DefaultBehavior; +impl PlanVisitor for SideEffectVisitor { + type Result = bool; + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { Merge(|a, b| a | b) diff --git a/src/frontend/src/optimizer/plan_visitor/sys_table_visitor.rs b/src/frontend/src/optimizer/plan_visitor/sys_table_visitor.rs index 7b5210ca68db7..dcbfb2d93d3f5 100644 --- a/src/frontend/src/optimizer/plan_visitor/sys_table_visitor.rs +++ b/src/frontend/src/optimizer/plan_visitor/sys_table_visitor.rs @@ -27,15 +27,17 @@ impl SysTableVisitor { } } -impl PlanVisitor for SysTableVisitor { - type DefaultBehavior = impl DefaultBehavior; +impl PlanVisitor for SysTableVisitor { + type Result = bool; + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { Merge(|a, b| a | b) } fn visit_batch_seq_scan(&mut self, batch_seq_scan: &BatchSeqScan) -> bool { - batch_seq_scan.logical().is_sys_table + batch_seq_scan.core().is_sys_table } fn visit_logical_scan(&mut self, logical_scan: &LogicalScan) -> bool { @@ -43,6 +45,6 @@ impl PlanVisitor for SysTableVisitor { } fn visit_stream_table_scan(&mut self, stream_table_scan: &StreamTableScan) -> bool { - stream_table_scan.logical().is_sys_table + stream_table_scan.core().is_sys_table } } diff --git a/src/frontend/src/optimizer/plan_visitor/temporal_join_validator.rs b/src/frontend/src/optimizer/plan_visitor/temporal_join_validator.rs index 7fb0b1b49e309..e80fcddb87324 100644 --- a/src/frontend/src/optimizer/plan_visitor/temporal_join_validator.rs +++ b/src/frontend/src/optimizer/plan_visitor/temporal_join_validator.rs @@ -29,19 +29,21 @@ impl TemporalJoinValidator { } } -impl PlanVisitor for TemporalJoinValidator { - type DefaultBehavior = impl DefaultBehavior; +impl PlanVisitor for TemporalJoinValidator { + type Result = bool; + + type DefaultBehavior = impl DefaultBehavior; fn default_behavior() -> Self::DefaultBehavior { Merge(|a, b| a | b) } fn visit_stream_table_scan(&mut self, stream_table_scan: &StreamTableScan) -> bool { - stream_table_scan.logical().for_system_time_as_of_proctime + stream_table_scan.core().for_system_time_as_of_proctime } fn visit_batch_seq_scan(&mut self, batch_seq_scan: &BatchSeqScan) -> bool { - batch_seq_scan.logical().for_system_time_as_of_proctime + batch_seq_scan.core().for_system_time_as_of_proctime } fn visit_logical_scan(&mut self, logical_scan: &LogicalScan) -> bool { diff --git a/src/frontend/src/optimizer/property/distribution.rs b/src/frontend/src/optimizer/property/distribution.rs index c4b09bd910c5f..2df1d7ae00bc3 100644 --- a/src/frontend/src/optimizer/property/distribution.rs +++ b/src/frontend/src/optimizer/property/distribution.rs @@ -59,7 +59,6 @@ use risingwave_pb::batch_plan::ExchangeInfo; use super::super::plan_node::*; use crate::catalog::catalog_service::CatalogReader; use crate::catalog::FragmentId; -use crate::optimizer::plan_node::stream::StreamPlanRef; use crate::optimizer::property::Order; use crate::optimizer::PlanRef; use crate::scheduler::worker_node_manager::WorkerNodeSelector; @@ -296,10 +295,12 @@ impl RequiredDist { pub fn enforce_if_not_satisfies( &self, - plan: PlanRef, + mut plan: PlanRef, required_order: &Order, ) -> Result { - let plan = required_order.enforce_if_not_satisfies(plan)?; + if let Convention::Batch = plan.convention() { + plan = required_order.enforce_if_not_satisfies(plan)?; + } if !plan.distribution().satisfies(self) { Ok(self.enforce(plan, required_order)) } else { @@ -314,23 +315,6 @@ impl RequiredDist { } } - #[allow(dead_code)] - pub fn enforce_stream_if_not_satisfies( - &self, - plan: stream::PlanRef, - ) -> Result { - if !plan.distribution().satisfies(self) { - // FIXME(st1page); - Ok(stream::Exchange { - dist: self.to_dist(), - input: plan, - } - .into()) - } else { - Ok(plan) - } - } - /// check if the distribution satisfies other required distribution pub fn satisfies(&self, required: &RequiredDist) -> bool { match self { @@ -347,7 +331,7 @@ impl RequiredDist { } } - fn enforce(&self, plan: PlanRef, required_order: &Order) -> PlanRef { + pub fn enforce(&self, plan: PlanRef, required_order: &Order) -> PlanRef { let dist = self.to_dist(); match plan.convention() { Convention::Batch => BatchExchange::new(plan, required_order.clone(), dist).into(), diff --git a/src/frontend/src/optimizer/property/order.rs b/src/frontend/src/optimizer/property/order.rs index a70bffb13a8ba..19ad7586e1c11 100644 --- a/src/frontend/src/optimizer/property/order.rs +++ b/src/frontend/src/optimizer/property/order.rs @@ -92,7 +92,7 @@ impl Order { } } - pub fn enforce(&self, plan: PlanRef) -> PlanRef { + fn enforce(&self, plan: PlanRef) -> PlanRef { assert_eq!(plan.convention(), Convention::Batch); BatchSort::new(plan, self.clone()).into() } diff --git a/src/frontend/src/optimizer/rule/agg_call_merge_rule.rs b/src/frontend/src/optimizer/rule/agg_call_merge_rule.rs new file mode 100644 index 0000000000000..2a8a22bd0fcc7 --- /dev/null +++ b/src/frontend/src/optimizer/rule/agg_call_merge_rule.rs @@ -0,0 +1,57 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{BoxedRule, Rule}; +use crate::optimizer::plan_node::generic::Agg; +use crate::optimizer::plan_node::{LogicalProject, PlanTreeNodeUnary}; +use crate::PlanRef; + +/// Merges duplicated aggregate function calls in `LogicalAgg`, and project them back to the desired schema. +pub struct AggCallMergeRule {} + +impl Rule for AggCallMergeRule { + fn apply(&self, plan: PlanRef) -> Option { + let Some(agg) = plan.as_logical_agg() else { + return None; + }; + + let calls = agg.agg_calls(); + let mut new_calls = Vec::with_capacity(calls.len()); + let mut out_fields = (0..agg.group_key().len()).collect::>(); + out_fields.extend(calls.iter().map(|call| { + let pos = new_calls.iter().position(|c| c == call).unwrap_or_else(|| { + let pos = new_calls.len(); + new_calls.push(call.clone()); + pos + }); + agg.group_key().len() + pos + })); + + if calls.len() == new_calls.len() { + // no change + None + } else { + let new_agg = Agg::new(new_calls, agg.group_key().clone(), agg.input()) + .with_enable_two_phase(agg.core().two_phase_agg_enabled()) + .into(); + Some(LogicalProject::with_out_col_idx(new_agg, out_fields.into_iter()).into()) + } + } +} + +impl AggCallMergeRule { + pub fn create() -> BoxedRule { + Box::new(Self {}) + } +} diff --git a/src/frontend/src/optimizer/rule/agg_group_by_simplify_rule.rs b/src/frontend/src/optimizer/rule/agg_group_by_simplify_rule.rs new file mode 100644 index 0000000000000..3e22348e27b49 --- /dev/null +++ b/src/frontend/src/optimizer/rule/agg_group_by_simplify_rule.rs @@ -0,0 +1,91 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_expr::aggregate::AggKind; + +use super::super::plan_node::*; +use super::{BoxedRule, Rule}; +use crate::expr::InputRef; +use crate::optimizer::plan_node::generic::{Agg, GenericPlanRef}; +use crate::utils::{Condition, IndexSet}; + +/// Use functional dependencies to simplify aggregation's group by +/// Before: +/// group by = [a, b, c], where b -> [a, c] +/// After +/// group by b, `first_value`(a), `first_value`(c), +pub struct AggGroupBySimplifyRule {} +impl Rule for AggGroupBySimplifyRule { + fn apply(&self, plan: PlanRef) -> Option { + let agg: &LogicalAgg = plan.as_logical_agg()?; + let (agg_calls, group_key, grouping_sets, agg_input, _two_phase) = agg.clone().decompose(); + if !grouping_sets.is_empty() { + return None; + } + let functional_dependency = agg_input.functional_dependency(); + let group_key = group_key.to_vec(); + if !functional_dependency.is_key(&group_key) { + return None; + } + let minimized_group_key = functional_dependency.minimize_key(&group_key); + if minimized_group_key.len() < group_key.len() { + let new_group_key = IndexSet::from(minimized_group_key); + let new_group_key_len = new_group_key.len(); + let mut new_agg_calls = vec![]; + for &i in &group_key { + if !new_group_key.contains(i) { + let data_type = agg_input.schema().fields[i].data_type(); + new_agg_calls.push(PlanAggCall { + agg_kind: AggKind::InternalLastSeenValue, + return_type: data_type.clone(), + inputs: vec![InputRef::new(i, data_type)], + distinct: false, + order_by: vec![], + filter: Condition::true_cond(), + direct_args: vec![], + }); + } + } + new_agg_calls.extend(agg_calls); + + // Use project to align schema type + let mut out_fields = vec![]; + let mut remained_group_key_offset = 0; + let mut removed_group_key_offset = new_group_key_len; + for &i in &group_key { + if new_group_key.contains(i) { + out_fields.push(remained_group_key_offset); + remained_group_key_offset += 1; + } else { + out_fields.push(removed_group_key_offset); + removed_group_key_offset += 1; + } + } + for i in group_key.len()..agg.base.schema().len() { + out_fields.push(i); + } + let new_agg = Agg::new(new_agg_calls, new_group_key, agg.input()); + + Some(LogicalProject::with_out_col_idx(new_agg.into(), out_fields.into_iter()).into()) + } else { + None + } + } +} + +impl AggGroupBySimplifyRule { + pub fn create() -> BoxedRule { + Box::new(AggGroupBySimplifyRule {}) + } +} diff --git a/src/frontend/src/optimizer/rule/agg_project_merge_rule.rs b/src/frontend/src/optimizer/rule/agg_project_merge_rule.rs index 3c6056e2db279..3f58b1af7c6d5 100644 --- a/src/frontend/src/optimizer/rule/agg_project_merge_rule.rs +++ b/src/frontend/src/optimizer/rule/agg_project_merge_rule.rs @@ -16,7 +16,6 @@ use itertools::Itertools; use super::super::plan_node::*; use super::{BoxedRule, Rule}; -use crate::optimizer::plan_node::generic::Agg; use crate::utils::IndexSet; /// Merge [`LogicalAgg`] <- [`LogicalProject`] to [`LogicalAgg`]. @@ -24,31 +23,33 @@ pub struct AggProjectMergeRule {} impl Rule for AggProjectMergeRule { fn apply(&self, plan: PlanRef) -> Option { let agg = plan.as_logical_agg()?; - let (mut agg_calls, agg_group_keys, grouping_sets, input) = agg.clone().decompose(); - assert!(grouping_sets.is_empty()); - let proj = input.as_logical_project()?; - + let agg = agg.core().clone(); + assert!(agg.grouping_sets.is_empty()); + let old_input = agg.input.clone(); + let proj = old_input.as_logical_project()?; // only apply when the input proj is all input-ref if !proj.is_all_inputref() { return None; } - let proj_o2i = proj.o2i_col_mapping(); - let new_input = proj.input(); - - // modify agg calls according to projection - agg_calls - .iter_mut() - .for_each(|x| x.rewrite_input_index(proj_o2i.clone())); // modify group key according to projection - let new_agg_group_keys_in_vec = agg_group_keys + let new_agg_group_keys_in_vec = agg + .group_key .indices() .map(|x| proj_o2i.map(x)) .collect_vec(); - let new_agg_group_keys = IndexSet::from_iter(new_agg_group_keys_in_vec.clone()); + let mut agg = agg; + agg.input = proj.input(); + // modify agg calls according to projection + agg.agg_calls + .iter_mut() + .for_each(|x| x.rewrite_input_index(proj_o2i.clone())); + agg.group_key = new_agg_group_keys.clone(); + agg.input = proj.input(); + if new_agg_group_keys.to_vec() != new_agg_group_keys_in_vec { // Need a project let new_agg_group_keys_cardinality = new_agg_group_keys.len(); @@ -57,17 +58,11 @@ impl Rule for AggProjectMergeRule { .map(|x| new_agg_group_keys.indices().position(|y| y == x).unwrap()) .chain( new_agg_group_keys_cardinality - ..new_agg_group_keys_cardinality + agg_calls.len(), + ..new_agg_group_keys_cardinality + agg.agg_calls.len(), ); - Some( - LogicalProject::with_out_col_idx( - Agg::new(agg_calls, new_agg_group_keys.clone(), new_input).into(), - out_col_idx, - ) - .into(), - ) + Some(LogicalProject::with_out_col_idx(agg.into(), out_col_idx).into()) } else { - Some(Agg::new(agg_calls, new_agg_group_keys, new_input).into()) + Some(agg.into()) } } } diff --git a/src/frontend/src/optimizer/rule/always_false_filter_rule.rs b/src/frontend/src/optimizer/rule/always_false_filter_rule.rs index 02165232372e4..eeba7d9f3be3b 100644 --- a/src/frontend/src/optimizer/rule/always_false_filter_rule.rs +++ b/src/frontend/src/optimizer/rule/always_false_filter_rule.rs @@ -15,6 +15,7 @@ use risingwave_common::types::ScalarImpl; use super::Rule; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{LogicalFilter, LogicalValues}; use crate::PlanRef; diff --git a/src/frontend/src/optimizer/rule/apply_agg_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_agg_transpose_rule.rs index 8781ca58b5ae8..3522473f47839 100644 --- a/src/frontend/src/optimizer/rule/apply_agg_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_agg_transpose_rule.rs @@ -13,7 +13,7 @@ // limitations under the License. use risingwave_common::types::DataType; -use risingwave_expr::agg::AggKind; +use risingwave_expr::aggregate::AggKind; use risingwave_pb::plan_common::JoinType; use super::{ApplyOffsetRewriter, BoxedRule, Rule}; @@ -52,7 +52,8 @@ impl Rule for ApplyAggTransposeRule { apply.clone().decompose(); assert_eq!(join_type, JoinType::Inner); let agg: &LogicalAgg = right.as_logical_agg()?; - let (mut agg_calls, agg_group_key, grouping_sets, agg_input) = agg.clone().decompose(); + let (mut agg_calls, agg_group_key, grouping_sets, agg_input, enable_two_phase) = + agg.clone().decompose(); assert!(grouping_sets.is_empty()); let is_scalar_agg = agg_group_key.is_empty(); let apply_left_len = left.schema().len(); @@ -147,7 +148,9 @@ impl Rule for ApplyAggTransposeRule { } let mut group_keys: IndexSet = (0..apply_left_len).collect(); group_keys.extend(agg_group_key.indices().map(|key| key + apply_left_len)); - Agg::new(agg_calls, group_keys, node).into() + Agg::new(agg_calls, group_keys, node) + .with_enable_two_phase(enable_two_phase) + .into() }; let filter = LogicalFilter::create(group_agg, on); diff --git a/src/frontend/src/optimizer/rule/apply_hop_window_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_hop_window_transpose_rule.rs new file mode 100644 index 0000000000000..bf332fee28417 --- /dev/null +++ b/src/frontend/src/optimizer/rule/apply_hop_window_transpose_rule.rs @@ -0,0 +1,93 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::plan_common::JoinType; + +use super::{BoxedRule, Rule}; +use crate::optimizer::plan_node::{LogicalApply, LogicalFilter, LogicalHopWindow}; +use crate::optimizer::PlanRef; +use crate::utils::Condition; + +/// Transpose `LogicalApply` and `LogicalHopWindow`. +/// +/// Before: +/// +/// ```text +/// LogicalApply +/// / \ +/// Domain LogicalHopWindow +/// | +/// Input +/// ``` +/// +/// After: +/// +/// ```text +/// LogicalHopWindow +/// | +/// LogicalApply +/// / \ +/// Domain Input +/// ``` +pub struct ApplyHopWindowTransposeRule {} +impl Rule for ApplyHopWindowTransposeRule { + fn apply(&self, plan: PlanRef) -> Option { + let apply: &LogicalApply = plan.as_logical_apply()?; + let (left, right, on, join_type, correlated_id, correlated_indices, max_one_row) = + apply.clone().decompose(); + let hop_window: &LogicalHopWindow = right.as_logical_hop_window()?; + assert_eq!(join_type, JoinType::Inner); + + if !hop_window.output_indices_are_trivial() { + return None; + } + + let (hop_window_input, time_col, window_slide, window_size, window_offset, _output_indices) = + hop_window.clone().into_parts(); + + let apply_left_len = left.schema().len() as isize; + + if max_one_row { + return None; + } + + let new_apply = LogicalApply::new( + left, + hop_window_input, + JoinType::Inner, + Condition::true_cond(), + correlated_id, + correlated_indices, + false, + ) + .into(); + + let new_hop_window = LogicalHopWindow::create( + new_apply, + time_col.clone_with_offset(apply_left_len), + window_slide, + window_size, + window_offset, + ); + + let filter = LogicalFilter::create(new_hop_window, on); + Some(filter) + } +} + +impl ApplyHopWindowTransposeRule { + pub fn create() -> BoxedRule { + Box::new(ApplyHopWindowTransposeRule {}) + } +} diff --git a/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs index 089a66f0ad08b..7ac121692c81d 100644 --- a/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs @@ -23,6 +23,7 @@ use crate::expr::{ CorrelatedId, CorrelatedInputRef, Expr, ExprImpl, ExprRewriter, ExprType, FunctionCall, InputRef, }; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{LogicalApply, LogicalFilter, LogicalJoin, PlanTreeNodeBinary}; use crate::optimizer::plan_visitor::{ExprCorrelatedIdFinder, PlanCorrelatedIdFinder}; use crate::optimizer::rule::apply_offset_rewriter::ApplyCorrelatedIndicesConverter; @@ -122,7 +123,7 @@ impl Rule for ApplyJoinTransposeRule { } assert!( - join.output_indices_is_trivial(), + join.output_indices_are_trivial(), "ApplyJoinTransposeRule requires the join containing no output indices, so make sure ProjectJoinSeparateRule is always applied before this rule" ); diff --git a/src/frontend/src/optimizer/rule/cross_join_eliminate_rule.rs b/src/frontend/src/optimizer/rule/cross_join_eliminate_rule.rs index 336ec74fa9094..ddc8ff3efc2ba 100644 --- a/src/frontend/src/optimizer/rule/cross_join_eliminate_rule.rs +++ b/src/frontend/src/optimizer/rule/cross_join_eliminate_rule.rs @@ -44,7 +44,7 @@ impl Rule for CrossJoinEliminateRule { && join_type == JoinType::Inner && values.rows().len() == 1 // one row && values.rows()[0].is_empty() // no columns - && join.output_indices_is_trivial() + && join.output_indices_are_trivial() { Some(left) } else { diff --git a/src/frontend/src/optimizer/rule/distinct_agg_rule.rs b/src/frontend/src/optimizer/rule/distinct_agg_rule.rs index b7f9a5f902109..3822651c41f65 100644 --- a/src/frontend/src/optimizer/rule/distinct_agg_rule.rs +++ b/src/frontend/src/optimizer/rule/distinct_agg_rule.rs @@ -18,7 +18,7 @@ use std::mem; use fixedbitset::FixedBitSet; use itertools::Itertools; use risingwave_common::types::DataType; -use risingwave_expr::agg::{agg_kinds, AggKind}; +use risingwave_expr::aggregate::{agg_kinds, AggKind}; use super::{BoxedRule, Rule}; use crate::expr::{CollectInputRef, ExprType, FunctionCall, InputRef, Literal}; @@ -35,7 +35,8 @@ pub struct DistinctAggRule { impl Rule for DistinctAggRule { fn apply(&self, plan: PlanRef) -> Option { let agg: &LogicalAgg = plan.as_logical_agg()?; - let (mut agg_calls, mut agg_group_keys, grouping_sets, input) = agg.clone().decompose(); + let (mut agg_calls, mut agg_group_keys, grouping_sets, input, enable_two_phase) = + agg.clone().decompose(); assert!(grouping_sets.is_empty()); if agg_calls.iter().all(|c| !c.distinct) { @@ -84,6 +85,7 @@ impl Rule for DistinctAggRule { agg_calls, flag_values, has_expand, + enable_two_phase, )) } } @@ -237,7 +239,7 @@ impl DistinctAggRule { // append `flag`. group_keys.insert(project.schema().len() - 1); } - Agg::new(agg_calls, group_keys, project) + Agg::new(agg_calls, group_keys, project).with_enable_two_phase(false) } fn build_final_agg( @@ -246,6 +248,7 @@ impl DistinctAggRule { mut agg_calls: Vec, flag_values: Vec, has_expand: bool, + enable_two_phase: bool, ) -> PlanRef { // the index of `flag` in schema of the middle `LogicalAgg`, if has `Expand`. let pos_of_flag = mid_agg.group_key.len() - 1; @@ -322,6 +325,8 @@ impl DistinctAggRule { } }); - Agg::new(agg_calls, final_agg_group_keys, mid_agg.into()).into() + Agg::new(agg_calls, final_agg_group_keys, mid_agg.into()) + .with_enable_two_phase(enable_two_phase) + .into() } } diff --git a/src/frontend/src/optimizer/rule/expand_to_project_rule.rs b/src/frontend/src/optimizer/rule/expand_to_project_rule.rs index 1ed1da0037aba..01a39042efd98 100644 --- a/src/frontend/src/optimizer/rule/expand_to_project_rule.rs +++ b/src/frontend/src/optimizer/rule/expand_to_project_rule.rs @@ -36,7 +36,7 @@ impl Rule for ExpandToProjectRule { let column_subset = column_subsets.get(0).unwrap(); // if `column_subsets` len equals 1, convert it into a project - let mut exprs = Vec::with_capacity(expand.base.schema.len()); + let mut exprs = Vec::with_capacity(expand.base.schema().len()); // Add original input column first for i in 0..input.schema().len() { exprs.push(ExprImpl::InputRef( diff --git a/src/frontend/src/optimizer/rule/grouping_sets_to_expand_rule.rs b/src/frontend/src/optimizer/rule/grouping_sets_to_expand_rule.rs index 2073743c90c17..344241441f81c 100644 --- a/src/frontend/src/optimizer/rule/grouping_sets_to_expand_rule.rs +++ b/src/frontend/src/optimizer/rule/grouping_sets_to_expand_rule.rs @@ -16,7 +16,7 @@ use fixedbitset::FixedBitSet; use itertools::Itertools; use risingwave_common::types::DataType; use risingwave_common::util::column_index_mapping::ColIndexMapping; -use risingwave_expr::agg::AggKind; +use risingwave_expr::aggregate::AggKind; use super::super::plan_node::*; use super::{BoxedRule, Rule}; @@ -75,7 +75,7 @@ impl Rule for GroupingSetsToExpandRule { return None; } let agg = Self::prune_column_for_agg(agg); - let (agg_calls, mut group_keys, grouping_sets, input) = agg.decompose(); + let (agg_calls, mut group_keys, grouping_sets, input, enable_two_phase) = agg.decompose(); let flag_col_idx = group_keys.len(); let input_schema_len = input.schema().len(); @@ -159,7 +159,8 @@ impl Rule for GroupingSetsToExpandRule { } } - let new_agg = Agg::new(new_agg_calls, group_keys, expand); + let new_agg = + Agg::new(new_agg_calls, group_keys, expand).with_enable_two_phase(enable_two_phase); let project_exprs = (0..flag_col_idx) .map(|i| { ExprImpl::InputRef( diff --git a/src/frontend/src/optimizer/rule/index_delta_join_rule.rs b/src/frontend/src/optimizer/rule/index_delta_join_rule.rs index e9147ce0ec882..30435d635568b 100644 --- a/src/frontend/src/optimizer/rule/index_delta_join_rule.rs +++ b/src/frontend/src/optimizer/rule/index_delta_join_rule.rs @@ -54,7 +54,7 @@ impl Rule for IndexDeltaJoinRule { table_scan: &StreamTableScan, chain_type: ChainType, ) -> Option { - for index in &table_scan.logical().indexes { + for index in &table_scan.core().indexes { // Only full covering index can be used in delta join if !index.full_covering() { continue; @@ -68,7 +68,7 @@ impl Rule for IndexDeltaJoinRule { // keys here. let join_indices_ref_to_index_table = join_indices .iter() - .map(|&i| table_scan.logical().output_col_idx[i]) + .map(|&i| table_scan.core().output_col_idx[i]) .map(|x| *p2s_mapping.get(&x).unwrap()) .collect_vec(); @@ -103,7 +103,7 @@ impl Rule for IndexDeltaJoinRule { } // Primary table is also an index. - let primary_table = table_scan.logical(); + let primary_table = table_scan.core(); if let Some(primary_table_distribution_key) = primary_table.distribution_key() && primary_table_distribution_key == join_indices { @@ -123,7 +123,7 @@ impl Rule for IndexDeltaJoinRule { if chain_type != table_scan.chain_type() { Some( StreamTableScan::new_with_chain_type( - table_scan.logical().clone(), + table_scan.core().clone(), chain_type, ) .into(), diff --git a/src/frontend/src/optimizer/rule/index_selection_rule.rs b/src/frontend/src/optimizer/rule/index_selection_rule.rs index c16cd7e31bf28..323cc59ef3558 100644 --- a/src/frontend/src/optimizer/rule/index_selection_rule.rs +++ b/src/frontend/src/optimizer/rule/index_selection_rule.rs @@ -66,6 +66,7 @@ use crate::expr::{ FunctionCall, InputRef, }; use crate::optimizer::optimizer_context::OptimizerContextRef; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{ generic, ColumnPruningContext, LogicalJoin, LogicalScan, LogicalUnion, PlanTreeNode, PlanTreeNodeBinary, PredicatePushdown, PredicatePushdownContext, @@ -917,7 +918,9 @@ impl IndexCost { } } -impl ExprVisitor for TableScanIoEstimator<'_> { +impl ExprVisitor for TableScanIoEstimator<'_> { + type Result = IndexCost; + fn visit_function_call(&mut self, func_call: &FunctionCall) -> IndexCost { match func_call.func_type() { ExprType::Or => func_call @@ -944,7 +947,9 @@ struct ExprInputRefFinder { pub input_ref_index_set: HashSet, } -impl ExprVisitor<()> for ExprInputRefFinder { +impl ExprVisitor for ExprInputRefFinder { + type Result = (); + fn merge(_: (), _: ()) {} fn visit_input_ref(&mut self, input_ref: &InputRef) { diff --git a/src/frontend/src/optimizer/rule/left_deep_tree_join_ordering_rule.rs b/src/frontend/src/optimizer/rule/left_deep_tree_join_ordering_rule.rs index dcbb6f7b015ee..bd2db0ac67cca 100644 --- a/src/frontend/src/optimizer/rule/left_deep_tree_join_ordering_rule.rs +++ b/src/frontend/src/optimizer/rule/left_deep_tree_join_ordering_rule.rs @@ -47,6 +47,7 @@ mod tests { use super::*; use crate::expr::{ExprImpl, FunctionCall, InputRef}; use crate::optimizer::optimizer_context::OptimizerContext; + use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::utils::Condition; #[tokio::test] diff --git a/src/frontend/src/optimizer/rule/merge_multijoin_rule.rs b/src/frontend/src/optimizer/rule/merge_multijoin_rule.rs index c496a906400ae..8682db8491a1d 100644 --- a/src/frontend/src/optimizer/rule/merge_multijoin_rule.rs +++ b/src/frontend/src/optimizer/rule/merge_multijoin_rule.rs @@ -46,6 +46,7 @@ mod tests { use super::*; use crate::expr::{ExprImpl, FunctionCall, InputRef}; use crate::optimizer::optimizer_context::OptimizerContext; + use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::utils::Condition; #[tokio::test] diff --git a/src/frontend/src/optimizer/rule/min_max_on_index_rule.rs b/src/frontend/src/optimizer/rule/min_max_on_index_rule.rs index d2fdd4bdfe32e..c32ae40531cd0 100644 --- a/src/frontend/src/optimizer/rule/min_max_on_index_rule.rs +++ b/src/frontend/src/optimizer/rule/min_max_on_index_rule.rs @@ -23,11 +23,11 @@ use std::vec; use itertools::Itertools; use risingwave_common::types::DataType; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; -use risingwave_expr::agg::AggKind; +use risingwave_expr::aggregate::AggKind; use super::{BoxedRule, Rule}; use crate::expr::{ExprImpl, ExprType, FunctionCall, InputRef}; -use crate::optimizer::plan_node::generic::Agg; +use crate::optimizer::plan_node::generic::{Agg, GenericPlanRef}; use crate::optimizer::plan_node::{ LogicalAgg, LogicalFilter, LogicalScan, LogicalTopN, PlanAggCall, PlanTreeNodeUnary, }; diff --git a/src/frontend/src/optimizer/rule/mod.rs b/src/frontend/src/optimizer/rule/mod.rs index 6542ac8e163bb..7867bb1bb54f9 100644 --- a/src/frontend/src/optimizer/rule/mod.rs +++ b/src/frontend/src/optimizer/rule/mod.rs @@ -144,6 +144,12 @@ mod apply_expand_transpose_rule; pub use apply_expand_transpose_rule::*; mod expand_to_project_rule; pub use expand_to_project_rule::*; +mod agg_group_by_simplify_rule; +pub use agg_group_by_simplify_rule::*; +mod apply_hop_window_transpose_rule; +pub use apply_hop_window_transpose_rule::*; +mod agg_call_merge_rule; +pub use agg_call_merge_rule::*; #[macro_export] macro_rules! for_all_rules { @@ -206,6 +212,9 @@ macro_rules! for_all_rules { , { ApplyOverWindowTransposeRule } , { ApplyExpandTransposeRule } , { ExpandToProjectRule } + , { AggGroupBySimplifyRule } + , { ApplyHopWindowTransposeRule } + , { AggCallMergeRule } } }; } diff --git a/src/frontend/src/optimizer/rule/over_window_to_agg_and_join_rule.rs b/src/frontend/src/optimizer/rule/over_window_to_agg_and_join_rule.rs index b9587650f8726..dbf3e9809675c 100644 --- a/src/frontend/src/optimizer/rule/over_window_to_agg_and_join_rule.rs +++ b/src/frontend/src/optimizer/rule/over_window_to_agg_and_join_rule.rs @@ -13,7 +13,7 @@ // limitations under the License. use itertools::Itertools; -use risingwave_expr::function::window::WindowFuncKind; +use risingwave_expr::window_function::WindowFuncKind; use risingwave_pb::expr::expr_node::Type; use risingwave_pb::plan_common::JoinType; diff --git a/src/frontend/src/optimizer/rule/over_window_to_topn_rule.rs b/src/frontend/src/optimizer/rule/over_window_to_topn_rule.rs index 297522a41c8c9..93637d3ba8193 100644 --- a/src/frontend/src/optimizer/rule/over_window_to_topn_rule.rs +++ b/src/frontend/src/optimizer/rule/over_window_to_topn_rule.rs @@ -14,10 +14,11 @@ use fixedbitset::FixedBitSet; use risingwave_common::types::DataType; -use risingwave_expr::function::window::WindowFuncKind; +use risingwave_expr::window_function::WindowFuncKind; use super::Rule; use crate::expr::{collect_input_refs, ExprImpl, ExprType}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{LogicalFilter, LogicalTopN, PlanTreeNodeUnary}; use crate::optimizer::property::Order; use crate::planner::LIMIT_ALL_COUNT; diff --git a/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_rule.rs b/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_rule.rs index dc5f9c2bc9aba..f34146ba80050 100644 --- a/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_rule.rs +++ b/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_rule.rs @@ -18,6 +18,7 @@ use risingwave_common::util::column_index_mapping::ColIndexMapping; use super::super::plan_node::*; use super::{BoxedRule, Rule}; use crate::expr::{CorrelatedId, CorrelatedInputRef, Expr, ExprImpl, ExprRewriter, InputRef}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_visitor::{PlanCorrelatedIdFinder, PlanVisitor}; use crate::optimizer::PlanRef; use crate::utils::Condition; diff --git a/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs b/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs index 394d569050c27..facad4a8da07c 100644 --- a/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs +++ b/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs @@ -48,7 +48,9 @@ impl Rule for RewriteLikeExprRule { struct HasLikeExprVisitor {} -impl ExprVisitor for HasLikeExprVisitor { +impl ExprVisitor for HasLikeExprVisitor { + type Result = bool; + fn merge(a: bool, b: bool) -> bool { a | b } diff --git a/src/frontend/src/optimizer/rule/table_function_to_project_set_rule.rs b/src/frontend/src/optimizer/rule/table_function_to_project_set_rule.rs index 095e08664e1c4..f85ffc2318459 100644 --- a/src/frontend/src/optimizer/rule/table_function_to_project_set_rule.rs +++ b/src/frontend/src/optimizer/rule/table_function_to_project_set_rule.rs @@ -18,12 +18,13 @@ use risingwave_common::types::DataType; use super::{BoxedRule, Rule}; use crate::expr::{Expr, ExprImpl, ExprType, FunctionCall, InputRef}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{ - LogicalProject, LogicalProjectSet, LogicalTableFunction, LogicalValues, + LogicalProject, LogicalProjectSet, LogicalTableFunction, LogicalValues, PlanTreeNodeUnary, }; use crate::optimizer::PlanRef; -/// Transform a table function into a project set +/// Transform a `TableFunction` (used in FROM clause) into a `ProjectSet` so that it can be unnested later if it contains `CorrelatedInputRef`. /// /// Before: /// @@ -51,14 +52,14 @@ impl Rule for TableFunctionToProjectSetRule { let logical_values = LogicalValues::create( vec![vec![]], Schema::new(vec![]), - logical_table_function.base.ctx.clone(), + logical_table_function.base.ctx().clone(), ); let logical_project_set = LogicalProjectSet::create(logical_values, vec![table_function]); - // We need a project to align schema type because `LogicalProjectSet` has a hidden column - // `projected_row_id` and table function could return multiple columns, while project set - // return only one column with struct type. + // We need a project to align schema type because + // 1. `LogicalProjectSet` has a hidden column `projected_row_id` (0-th col) + // 2. When the function returns a struct type, TableFunction will return flatten it into multiple columns, while ProjectSet still returns a single column. let table_function_col_idx = 1; - if let DataType::Struct(st) = table_function_return_type.clone() { + let logical_project = if let DataType::Struct(st) = table_function_return_type.clone() { let exprs = st .types() .enumerate() @@ -66,13 +67,11 @@ impl Rule for TableFunctionToProjectSetRule { let field_access = FunctionCall::new_unchecked( ExprType::Field, vec![ - ExprImpl::InputRef( - InputRef::new( - table_function_col_idx, - table_function_return_type.clone(), - ) - .into(), - ), + InputRef::new( + table_function_col_idx, + table_function_return_type.clone(), + ) + .into(), ExprImpl::literal_int(i as i32), ], data_type.clone(), @@ -80,13 +79,27 @@ impl Rule for TableFunctionToProjectSetRule { ExprImpl::FunctionCall(field_access.into()) }) .collect_vec(); - let logical_project = LogicalProject::new(logical_project_set, exprs); - Some(logical_project.into()) + LogicalProject::new(logical_project_set, exprs) } else { - let logical_project = LogicalProject::with_out_col_idx( + LogicalProject::with_out_col_idx( logical_project_set, std::iter::once(table_function_col_idx), - ); + ) + }; + + if logical_table_function.with_ordinality { + let projected_row_id = InputRef::new(0, DataType::Int64).into(); + let ordinality = FunctionCall::new( + ExprType::Add, + vec![projected_row_id, ExprImpl::literal_bigint(1)], + ) + .unwrap() // i64 + i64 is ok + .into(); + let mut exprs = logical_project.exprs().clone(); + exprs.push(ordinality); + let logical_project = LogicalProject::new(logical_project.input(), exprs); + Some(logical_project.into()) + } else { Some(logical_project.into()) } } diff --git a/src/frontend/src/optimizer/rule/trivial_project_to_values_rule.rs b/src/frontend/src/optimizer/rule/trivial_project_to_values_rule.rs index 9759739490fe6..a13bef3baa9d9 100644 --- a/src/frontend/src/optimizer/rule/trivial_project_to_values_rule.rs +++ b/src/frontend/src/optimizer/rule/trivial_project_to_values_rule.rs @@ -13,6 +13,7 @@ // limitations under the License. use super::{BoxedRule, Rule}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::{LogicalValues, PlanTreeNodeUnary}; use crate::optimizer::plan_visitor::{LogicalCardinalityExt, SideEffectVisitor}; use crate::optimizer::{PlanRef, PlanVisitor}; diff --git a/src/frontend/src/optimizer/rule/union_input_values_merge_rule.rs b/src/frontend/src/optimizer/rule/union_input_values_merge_rule.rs index 8119b8847b600..7b83c017ab781 100644 --- a/src/frontend/src/optimizer/rule/union_input_values_merge_rule.rs +++ b/src/frontend/src/optimizer/rule/union_input_values_merge_rule.rs @@ -13,6 +13,7 @@ // limitations under the License. use super::{BoxedRule, Rule}; +use crate::optimizer::plan_node::generic::GenericPlanRef; use crate::optimizer::plan_node::LogicalValues; use crate::optimizer::{PlanRef, PlanTreeNode}; diff --git a/src/frontend/src/optimizer/rule/union_to_distinct_rule.rs b/src/frontend/src/optimizer/rule/union_to_distinct_rule.rs index bd4764fe04f1e..f1d203fba1350 100644 --- a/src/frontend/src/optimizer/rule/union_to_distinct_rule.rs +++ b/src/frontend/src/optimizer/rule/union_to_distinct_rule.rs @@ -13,7 +13,7 @@ // limitations under the License. use super::{BoxedRule, Rule}; -use crate::optimizer::plan_node::generic::Agg; +use crate::optimizer::plan_node::generic::{Agg, GenericPlanRef}; use crate::optimizer::plan_node::{LogicalUnion, PlanTreeNode}; use crate::optimizer::PlanRef; @@ -24,7 +24,8 @@ impl Rule for UnionToDistinctRule { let union: &LogicalUnion = plan.as_logical_union()?; if !union.all() { let union_all = LogicalUnion::create(true, union.inputs().into_iter().collect()); - let distinct = Agg::new(vec![], (0..union.base.schema.len()).collect(), union_all); + let distinct = Agg::new(vec![], (0..union.base.schema().len()).collect(), union_all) + .with_enable_two_phase(false); Some(distinct.into()) } else { None diff --git a/src/frontend/src/planner/delete.rs b/src/frontend/src/planner/delete.rs index c6d779ed26603..00bb7fd59ae9a 100644 --- a/src/frontend/src/planner/delete.rs +++ b/src/frontend/src/planner/delete.rs @@ -17,7 +17,7 @@ use risingwave_common::error::Result; use super::Planner; use crate::binder::BoundDelete; -use crate::optimizer::plan_node::{generic, LogicalDelete, LogicalFilter, LogicalProject}; +use crate::optimizer::plan_node::{generic, LogicalDelete, LogicalProject}; use crate::optimizer::property::{Order, RequiredDist}; use crate::optimizer::{PlanRef, PlanRoot}; @@ -25,7 +25,7 @@ impl Planner { pub(super) fn plan_delete(&mut self, delete: BoundDelete) -> Result { let scan = self.plan_base_table(&delete.table)?; let input = if let Some(expr) = delete.selection { - LogicalFilter::create_with_expr(scan, expr) + self.plan_where(scan, expr)? } else { scan }; diff --git a/src/frontend/src/planner/relation.rs b/src/frontend/src/planner/relation.rs index db4bb0233f077..f4085f6ffa42e 100644 --- a/src/frontend/src/planner/relation.rs +++ b/src/frontend/src/planner/relation.rs @@ -46,7 +46,10 @@ impl Planner { Relation::Apply(join) => self.plan_apply(*join), Relation::WindowTableFunction(tf) => self.plan_window_table_function(*tf), Relation::Source(s) => self.plan_source(*s), - Relation::TableFunction(tf) => self.plan_table_function(tf), + Relation::TableFunction { + expr: tf, + with_ordinality, + } => self.plan_table_function(tf, with_ordinality), Relation::Watermark(tf) => self.plan_watermark(*tf), Relation::Share(share) => self.plan_share(*share), } @@ -150,16 +153,33 @@ impl Planner { } } - pub(super) fn plan_table_function(&mut self, table_function: ExprImpl) -> Result { + pub(super) fn plan_table_function( + &mut self, + table_function: ExprImpl, + with_ordinality: bool, + ) -> Result { // TODO: maybe we can unify LogicalTableFunction with LogicalValues match table_function { - ExprImpl::TableFunction(tf) => Ok(LogicalTableFunction::new(*tf, self.ctx()).into()), + ExprImpl::TableFunction(tf) => { + Ok(LogicalTableFunction::new(*tf, with_ordinality, self.ctx()).into()) + } expr => { - let schema = Schema { + let mut schema = Schema { // TODO: should be named fields: vec![Field::unnamed(expr.return_type())], }; - Ok(LogicalValues::create(vec![vec![expr]], schema, self.ctx())) + if with_ordinality { + schema + .fields + .push(Field::with_name(DataType::Int64, "ordinality")); + Ok(LogicalValues::create( + vec![vec![expr, ExprImpl::literal_bigint(1)]], + schema, + self.ctx(), + )) + } else { + Ok(LogicalValues::create(vec![vec![expr]], schema, self.ctx())) + } } } } diff --git a/src/frontend/src/planner/select.rs b/src/frontend/src/planner/select.rs index c21538534ac24..96b32680309df 100644 --- a/src/frontend/src/planner/select.rs +++ b/src/frontend/src/planner/select.rs @@ -216,7 +216,11 @@ impl Planner { /// `LeftSemi/LeftAnti` [`LogicalApply`] /// For other subqueries, we plan it as `LeftOuter` [`LogicalApply`] using /// [`Self::substitute_subqueries`]. - fn plan_where(&mut self, mut input: PlanRef, where_clause: ExprImpl) -> Result { + pub(super) fn plan_where( + &mut self, + mut input: PlanRef, + where_clause: ExprImpl, + ) -> Result { if !where_clause.has_subquery() { return Ok(LogicalFilter::create_with_expr(input, where_clause)); } diff --git a/src/frontend/src/planner/update.rs b/src/frontend/src/planner/update.rs index d04ffc4132391..f73f5354436fd 100644 --- a/src/frontend/src/planner/update.rs +++ b/src/frontend/src/planner/update.rs @@ -16,7 +16,6 @@ use fixedbitset::FixedBitSet; use itertools::Itertools; use risingwave_common::error::Result; -use super::select::LogicalFilter; use super::Planner; use crate::binder::BoundUpdate; use crate::optimizer::plan_node::{generic, LogicalProject, LogicalUpdate}; @@ -27,7 +26,7 @@ impl Planner { pub(super) fn plan_update(&mut self, update: BoundUpdate) -> Result { let scan = self.plan_base_table(&update.table)?; let input = if let Some(expr) = update.selection { - LogicalFilter::create_with_expr(scan, expr) + self.plan_where(scan, expr)? } else { scan }; diff --git a/src/frontend/src/scheduler/distributed/stage.rs b/src/frontend/src/scheduler/distributed/stage.rs index fde1bc7244368..7c3030370d56f 100644 --- a/src/frontend/src/scheduler/distributed/stage.rs +++ b/src/frontend/src/scheduler/distributed/stage.rs @@ -406,7 +406,7 @@ impl StageRunner { match status_res_inner { Ok(status) => { use risingwave_pb::task_service::task_info_response::TaskStatus as TaskStatusPb; - match TaskStatusPb::from_i32(status.task_status).unwrap() { + match TaskStatusPb::try_from(status.task_status).unwrap() { TaskStatusPb::Running => { running_task_cnt += 1; // The task running count should always less or equal than the diff --git a/src/frontend/src/scheduler/local.rs b/src/frontend/src/scheduler/local.rs index f3906ffbcc755..28cfa25b70bf1 100644 --- a/src/frontend/src/scheduler/local.rs +++ b/src/frontend/src/scheduler/local.rs @@ -52,7 +52,7 @@ use crate::scheduler::plan_fragmenter::{ExecutionPlanNode, Query, StageId}; use crate::scheduler::task_context::FrontendBatchTaskContext; use crate::scheduler::worker_node_manager::WorkerNodeSelector; use crate::scheduler::{ReadSnapshot, SchedulerError, SchedulerResult}; -use crate::session::{AuthContext, FrontendEnv}; +use crate::session::{AuthContext, FrontendEnv, SessionImpl}; pub type LocalQueryStream = ReceiverStream>; @@ -63,8 +63,7 @@ pub struct LocalQueryExecution { // The snapshot will be released when LocalQueryExecution is dropped. // TODO snapshot: ReadSnapshot, - auth_context: Arc, - shutdown_rx: ShutdownToken, + session: Arc, worker_node_manager: WorkerNodeSelector, } @@ -74,8 +73,7 @@ impl LocalQueryExecution { front_env: FrontendEnv, sql: S, snapshot: ReadSnapshot, - auth_context: Arc, - shutdown_rx: ShutdownToken, + session: Arc, ) -> Self { let sql = sql.into(); let worker_node_manager = WorkerNodeSelector::new( @@ -88,18 +86,24 @@ impl LocalQueryExecution { query, front_env, snapshot, - auth_context, - shutdown_rx, + session, worker_node_manager, } } + fn auth_context(&self) -> Arc { + self.session.auth_context() + } + + fn shutdown_rx(&self) -> ShutdownToken { + self.session.reset_cancel_query_flag() + } + #[try_stream(ok = DataChunk, error = RwError)] pub async fn run_inner(self) { debug!(%self.query.query_id, self.sql, "Starting to run query"); - let context = - FrontendBatchTaskContext::new(self.front_env.clone(), self.auth_context.clone()); + let context = FrontendBatchTaskContext::new(self.front_env.clone(), self.auth_context()); let task_id = TaskId { query_id: self.query.query_id.id.clone(), @@ -115,7 +119,7 @@ impl LocalQueryExecution { &task_id, context, self.snapshot.batch_query_epoch(), - self.shutdown_rx.clone(), + self.shutdown_rx().clone(), ); let executor = executor.build().await?; @@ -137,9 +141,14 @@ impl LocalQueryExecution { pub fn stream_rows(self) -> LocalQueryStream { let compute_runtime = self.front_env.compute_runtime(); let (sender, receiver) = mpsc::channel(10); - let shutdown_rx = self.shutdown_rx.clone(); + let shutdown_rx = self.shutdown_rx().clone(); + + let catalog_reader = self.front_env.catalog_reader().clone(); + let auth_context = self.session.auth_context().clone(); + let db_name = self.session.database().to_string(); + let search_path = self.session.config().get_search_path().clone(); - compute_runtime.spawn(async move { + let exec = async move { let mut data_stream = self.run().map(|r| r.map_err(|e| Box::new(e) as BoxedError)); while let Some(mut r) = data_stream.next().await { // append a query cancelled error if the query is cancelled. @@ -151,7 +160,18 @@ impl LocalQueryExecution { return; } } - }); + }; + + use crate::expr::function_impl::context::{ + AUTH_CONTEXT, CATALOG_READER, DB_NAME, SEARCH_PATH, + }; + + let exec = async move { CATALOG_READER::scope(catalog_reader, exec).await }; + let exec = async move { DB_NAME::scope(db_name, exec).await }; + let exec = async move { SEARCH_PATH::scope(search_path, exec).await }; + let exec = async move { AUTH_CONTEXT::scope(auth_context, exec).await }; + + compute_runtime.spawn(exec); ReceiverStream::new(receiver) } diff --git a/src/frontend/src/scheduler/plan_fragmenter.rs b/src/frontend/src/scheduler/plan_fragmenter.rs index ee40882c4cbf1..cb20103b3e76f 100644 --- a/src/frontend/src/scheduler/plan_fragmenter.rs +++ b/src/frontend/src/scheduler/plan_fragmenter.rs @@ -27,8 +27,9 @@ use risingwave_common::catalog::TableDesc; use risingwave_common::error::RwError; use risingwave_common::hash::{ParallelUnitId, ParallelUnitMapping, VirtualNode}; use risingwave_common::util::scan_range::ScanRange; +use risingwave_connector::source::kafka::KafkaSplitEnumerator; use risingwave_connector::source::{ - ConnectorProperties, SourceEnumeratorContext, SplitEnumeratorImpl, SplitImpl, + ConnectorProperties, SourceEnumeratorContext, SplitEnumerator, SplitImpl, }; use risingwave_pb::batch_plan::plan_node::NodeBody; use risingwave_pb::batch_plan::{ExchangeInfo, ScanRange as ScanRangeProto}; @@ -102,7 +103,7 @@ impl Serialize for ExecutionPlanNode { impl From for ExecutionPlanNode { fn from(plan_node: PlanRef) -> Self { Self { - plan_node_id: plan_node.plan_base().id, + plan_node_id: plan_node.plan_base().id(), plan_node_type: plan_node.node_type(), node: plan_node.to_batch_prost_body(), children: vec![], @@ -266,19 +267,17 @@ impl SourceScanInfo { unreachable!("Never call complete when SourceScanInfo is already complete") } }; - let mut enumerator = SplitEnumeratorImpl::create( - fetch_info.connector, - SourceEnumeratorContext::default().into(), - ) - .await?; - let kafka_enumerator = match enumerator { - SplitEnumeratorImpl::Kafka(ref mut kafka_enumerator) => kafka_enumerator, + let kafka_prop = match fetch_info.connector { + ConnectorProperties::Kafka(prop) => *prop, _ => { return Err(SchedulerError::Internal(anyhow!( "Unsupported to query directly from this source" ))) } }; + let mut kafka_enumerator = + KafkaSplitEnumerator::new(kafka_prop, SourceEnumeratorContext::default().into()) + .await?; let split_info = kafka_enumerator .list_splits_batch(fetch_info.timebound.0, fetch_info.timebound.1) .await? @@ -919,11 +918,11 @@ impl BatchPlanFragmenter { } if let Some(scan_node) = node.as_batch_seq_scan() { - let name = scan_node.logical().table_name.to_owned(); - let info = if scan_node.logical().is_sys_table { + let name = scan_node.core().table_name.to_owned(); + let info = if scan_node.core().is_sys_table { TableScanInfo::system_table(name) } else { - let table_desc = &*scan_node.logical().table_desc; + let table_desc = &*scan_node.core().table_desc; let table_catalog = self .catalog_reader .read_guard() @@ -952,11 +951,11 @@ impl BatchPlanFragmenter { return None; } if let Some(insert) = node.as_batch_insert() { - Some(insert.logical.table_id) + Some(insert.core.table_id) } else if let Some(update) = node.as_batch_update() { - Some(update.logical.table_id) + Some(update.core.table_id) } else if let Some(delete) = node.as_batch_delete() { - Some(delete.logical.table_id) + Some(delete.core.table_id) } else { node.inputs() .into_iter() diff --git a/src/frontend/src/session.rs b/src/frontend/src/session.rs index 9f9390cb629c2..67eac0df34d05 100644 --- a/src/frontend/src/session.rs +++ b/src/frontend/src/session.rs @@ -51,7 +51,7 @@ use risingwave_pb::health::health_server::HealthServer; use risingwave_pb::user::auth_info::EncryptionType; use risingwave_pb::user::grant_privilege::{Action, Object}; use risingwave_rpc_client::{ComputeClientPool, ComputeClientPoolRef, MetaClient}; -use risingwave_sqlparser::ast::{ObjectName, ShowObject, Statement}; +use risingwave_sqlparser::ast::{ObjectName, Statement}; use risingwave_sqlparser::parser::Parser; use thiserror::Error; use tokio::runtime::Builder; @@ -86,6 +86,7 @@ use crate::user::user_authentication::md5_hash_with_salt; use crate::user::user_manager::UserInfoManager; use crate::user::user_service::{UserInfoReader, UserInfoWriter, UserInfoWriterImpl}; use crate::user::UserId; +use crate::utils::infer_stmt_row_desc::infer_show_object; use crate::{FrontendOpts, PgResponseStream}; pub(crate) mod transaction; @@ -1090,25 +1091,7 @@ fn infer(bound: Option, stmt: Statement) -> Result match show_object { - ShowObject::Columns { table: _ } => Ok(vec![ - PgFieldDescriptor::new( - "Name".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - PgFieldDescriptor::new( - "Type".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - ), - ]), - _ => Ok(vec![PgFieldDescriptor::new( - "Name".to_owned(), - DataType::Varchar.to_oid(), - DataType::Varchar.type_len(), - )]), - }, + } => Ok(infer_show_object(&show_object)), Statement::ShowCreateObject { .. } => Ok(vec![ PgFieldDescriptor::new( "Name".to_owned(), @@ -1160,6 +1143,11 @@ fn infer(bound: Option, stmt: Statement) -> Result Ok(vec![PgFieldDescriptor::new( "QUERY PLAN".to_owned(), diff --git a/src/frontend/src/test_utils.rs b/src/frontend/src/test_utils.rs index e934bed502f42..cf915ae35713d 100644 --- a/src/frontend/src/test_utils.rs +++ b/src/frontend/src/test_utils.rs @@ -35,7 +35,10 @@ use risingwave_pb::catalog::{ PbDatabase, PbFunction, PbIndex, PbSchema, PbSink, PbSource, PbTable, PbView, Table, }; use risingwave_pb::ddl_service::{create_connection_request, DdlProgress}; -use risingwave_pb::hummock::HummockSnapshot; +use risingwave_pb::hummock::write_limits::WriteLimit; +use risingwave_pb::hummock::{ + BranchedObject, CompactionGroupInfo, HummockSnapshot, HummockVersion, HummockVersionDelta, +}; use risingwave_pb::meta::cancel_creating_jobs_request::PbJobs; use risingwave_pb::meta::list_actor_states_response::ActorState; use risingwave_pb::meta::list_fragment_distribution_response::FragmentDistribution; @@ -263,6 +266,7 @@ impl CatalogWriter for MockCatalogWriter { async fn replace_table( &self, + _source: Option, table: PbTable, _graph: StreamFragmentGraph, _mapping: ColIndexMapping, @@ -769,6 +773,10 @@ impl FrontendMetaClient for MockFrontendMetaClient { }) } + async fn wait(&self) -> RpcResult<()> { + Ok(()) + } + async fn cancel_creating_jobs(&self, _infos: PbJobs) -> RpcResult> { Ok(vec![]) } @@ -823,6 +831,42 @@ impl FrontendMetaClient for MockFrontendMetaClient { async fn get_tables(&self, _table_ids: &[u32]) -> RpcResult> { Ok(HashMap::new()) } + + async fn list_hummock_pinned_versions(&self) -> RpcResult> { + unimplemented!() + } + + async fn list_hummock_pinned_snapshots(&self) -> RpcResult> { + unimplemented!() + } + + async fn get_hummock_current_version(&self) -> RpcResult { + unimplemented!() + } + + async fn get_hummock_checkpoint_version(&self) -> RpcResult { + unimplemented!() + } + + async fn list_version_deltas(&self) -> RpcResult> { + unimplemented!() + } + + async fn list_branched_objects(&self) -> RpcResult> { + unimplemented!() + } + + async fn list_hummock_compaction_group_configs(&self) -> RpcResult> { + unimplemented!() + } + + async fn list_hummock_active_write_limits(&self) -> RpcResult> { + unimplemented!() + } + + async fn list_hummock_meta_configs(&self) -> RpcResult> { + unimplemented!() + } } #[cfg(test)] diff --git a/src/frontend/src/utils/condition.rs b/src/frontend/src/utils/condition.rs index 57910083dfbaf..5305f9f1f356a 100644 --- a/src/frontend/src/utils/condition.rs +++ b/src/frontend/src/utils/condition.rs @@ -844,7 +844,7 @@ impl Condition { .simplify() } - pub fn visit_expr + ?Sized>(&self, visitor: &mut V) -> R { + pub fn visit_expr(&self, visitor: &mut V) -> V::Result { self.conjunctions .iter() .map(|expr| visitor.visit_expr(expr)) diff --git a/src/frontend/src/utils/infer_stmt_row_desc.rs b/src/frontend/src/utils/infer_stmt_row_desc.rs new file mode 100644 index 0000000000000..fdc4b7c460fc2 --- /dev/null +++ b/src/frontend/src/utils/infer_stmt_row_desc.rs @@ -0,0 +1,165 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use pgwire::pg_field_descriptor::PgFieldDescriptor; +use risingwave_common::types::DataType; +use risingwave_sqlparser::ast::ShowObject; + +/// `infer_stmt_row_desc` is used to infer the row description for different show objects. +pub fn infer_show_object(objects: &ShowObject) -> Vec { + match objects { + ShowObject::Columns { .. } => vec![ + PgFieldDescriptor::new( + "Name".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Type".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Is Hidden".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + ], + ShowObject::Connection { .. } => vec![ + PgFieldDescriptor::new( + "Name".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Type".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Properties".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + ], + ShowObject::Function { .. } => vec![ + PgFieldDescriptor::new( + "Name".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Arguments".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Return Type".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Language".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Link".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + ], + ShowObject::Indexes { .. } => vec![ + PgFieldDescriptor::new( + "Name".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "On".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Key".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Include".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Distributed By".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + ], + ShowObject::Cluster => vec![ + PgFieldDescriptor::new( + "Addr".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "State".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Parallel Units".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Is Streaming".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Is Serving".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Is Unschedulable".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + ], + ShowObject::Jobs => vec![ + PgFieldDescriptor::new( + "Id".to_owned(), + DataType::Int64.to_oid(), + DataType::Int64.type_len(), + ), + PgFieldDescriptor::new( + "Statement".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + PgFieldDescriptor::new( + "Progress".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + ), + ], + _ => vec![PgFieldDescriptor::new( + "Name".to_owned(), + DataType::Varchar.to_oid(), + DataType::Varchar.type_len(), + )], + } +} diff --git a/src/frontend/src/utils/mod.rs b/src/frontend/src/utils/mod.rs index e827f1321bbd9..3951da525bb03 100644 --- a/src/frontend/src/utils/mod.rs +++ b/src/frontend/src/utils/mod.rs @@ -30,6 +30,8 @@ pub use rewrite_index::*; mod index_set; pub use index_set::*; pub(crate) mod group_by; +pub mod infer_stmt_row_desc; + pub use group_by::*; use crate::expr::{Expr, ExprImpl, ExprRewriter, InputRef}; diff --git a/src/frontend/src/utils/stream_graph_formatter.rs b/src/frontend/src/utils/stream_graph_formatter.rs index 28cab1380dfd0..2e9e6d1bb01ec 100644 --- a/src/frontend/src/utils/stream_graph_formatter.rs +++ b/src/frontend/src/utils/stream_graph_formatter.rs @@ -163,14 +163,20 @@ impl StreamGraphFormatter { self.pretty_add_table(source.get_state_table().unwrap()), )); } + stream_node::NodeBody::StreamFsFetch(node) if let Some(fetch) = &node.node_inner => { + fields.push(( + "fs fetch state table", + self.pretty_add_table(fetch.get_state_table().unwrap()), + )) + } stream_node::NodeBody::Materialize(node) => fields.push(( "materialized table", self.pretty_add_table(node.get_table().unwrap()), )), stream_node::NodeBody::SimpleAgg(inner) => { fields.push(( - "result table", - self.pretty_add_table(inner.get_result_table().unwrap()), + "intermediate state table", + self.pretty_add_table(inner.get_intermediate_state_table().unwrap()), )); fields.push(("state tables", self.call_states(&inner.agg_call_states))); fields.push(( @@ -180,8 +186,8 @@ impl StreamGraphFormatter { } stream_node::NodeBody::HashAgg(inner) => { fields.push(( - "result table", - self.pretty_add_table(inner.get_result_table().unwrap()), + "intermediate state table", + self.pretty_add_table(inner.get_intermediate_state_table().unwrap()), )); fields.push(("state tables", self.call_states(&inner.agg_call_states))); fields.push(( @@ -304,6 +310,7 @@ impl StreamGraphFormatter { stream_node::NodeBody::BarrierRecv(_) | stream_node::NodeBody::Values(_) | stream_node::NodeBody::Source(_) | + stream_node::NodeBody::StreamFsFetch(_) | stream_node::NodeBody::NoOp(_) => {} }; diff --git a/src/frontend/src/utils/with_options.rs b/src/frontend/src/utils/with_options.rs index bf17ace34119b..4b0a70ef856dc 100644 --- a/src/frontend/src/utils/with_options.rs +++ b/src/frontend/src/utils/with_options.rs @@ -18,6 +18,9 @@ use std::num::NonZeroU32; use itertools::Itertools; use risingwave_common::error::{ErrorCode, Result as RwResult, RwError}; +use risingwave_connector::source::kafka::{ + insert_privatelink_broker_rewrite_map, PRIVATELINK_ENDPOINT_KEY, +}; use risingwave_connector::source::KAFKA_CONNECTOR; use risingwave_sqlparser::ast::{ CompatibleSourceSchema, CreateConnectionStatement, CreateSinkStatement, CreateSourceStatement, @@ -58,6 +61,10 @@ impl WithOptions { } } + pub fn from_inner(inner: BTreeMap) -> Self { + Self { inner } + } + /// Get the reference of the inner map. pub fn inner(&self) -> &BTreeMap { &self.inner @@ -122,13 +129,27 @@ fn is_kafka_connector(with_options: &WithOptions) -> bool { connector == KAFKA_CONNECTOR } -pub(crate) fn resolve_connection_in_with_option( +pub(crate) fn resolve_privatelink_in_with_option( with_options: &mut WithOptions, schema_name: &Option, session: &SessionImpl, ) -> RwResult> { - let connection_name = get_connection_name(with_options); let is_kafka = is_kafka_connector(with_options); + let privatelink_endpoint = with_options.get(PRIVATELINK_ENDPOINT_KEY).cloned(); + + // if `privatelink.endpoint` is provided in WITH, use it to rewrite broker address directly + if let Some(endpoint) = privatelink_endpoint { + if !is_kafka { + return Err(RwError::from(ErrorCode::ProtocolError( + "Privatelink is only supported in kafka connector".to_string(), + ))); + } + insert_privatelink_broker_rewrite_map(with_options.inner_mut(), None, Some(endpoint)) + .map_err(RwError::from)?; + return Ok(None); + } + + let connection_name = get_connection_name(with_options); let connection_id = match connection_name { Some(connection_name) => { let connection = session diff --git a/src/java_binding/Cargo.toml b/src/java_binding/Cargo.toml index d8d90693f44a6..477f19878cbd9 100644 --- a/src/java_binding/Cargo.toml +++ b/src/java_binding/Cargo.toml @@ -10,7 +10,8 @@ ignored = ["workspace-hack"] normal = ["workspace-hack"] [dependencies] -prost = "0.11" +jni = "0.21.1" +prost = { workspace = true } risingwave_common = { workspace = true } risingwave_jni_core = { workspace = true } risingwave_pb = { workspace = true } diff --git a/src/java_binding/gen-demo-insert-data.py b/src/java_binding/gen-demo-insert-data.py index 6ffc79077eb82..716d0aadd1305 100644 --- a/src/java_binding/gen-demo-insert-data.py +++ b/src/java_binding/gen-demo-insert-data.py @@ -1,5 +1,20 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys + def gen_row(index): v1 = int(index) v2 = int(index) @@ -7,15 +22,15 @@ def gen_row(index): v4 = float(index) v5 = float(index) v6 = index % 3 == 0 - v7 = '\'' + str(index) * ((index % 10) + 1) + '\'' + v7 = "'" + str(index) * ((index % 10) + 1) + "'" v8 = "to_timestamp(" + str(index) + ")" v9 = index may_null = None if index % 5 == 0 else int(index) row_data = [v1, v2, v3, v4, v5, v6, v7, v8, v9, may_null] - repr = [str(o) if o is not None else 'null' for o in row_data] - return '(' + ', '.join(repr) + ')' + repr = [str(o) if o is not None else "null" for o in row_data] + return "(" + ", ".join(repr) + ")" data_size = int(sys.argv[1]) data = [gen_row(i) for i in range(data_size)] -print(', '.join(data)) +print(", ".join(data)) diff --git a/src/java_binding/make-java-binding.toml b/src/java_binding/make-java-binding.toml index 3be65ec2158a6..af76a18ba9b45 100644 --- a/src/java_binding/make-java-binding.toml +++ b/src/java_binding/make-java-binding.toml @@ -15,7 +15,7 @@ script = ''' #!/usr/bin/env bash set -ex cd java -mvn install --no-transfer-progress --pl java-binding-integration-test --am -DskipTests=true +mvn install --no-transfer-progress --pl java-binding-integration-test --am -DskipTests=true -Dmaven.javadoc.skip mvn dependency:copy-dependencies --no-transfer-progress --pl java-binding-integration-test ''' @@ -98,9 +98,6 @@ cd ${RISINGWAVE_ROOT}/java [tasks.run-java-binding-stream-chunk-benchmark] category = "RiseDev - Java Binding" description = "Run the java binding stream chunk benchmark" -dependencies = [ - "build-java-binding", -] script = ''' #!/usr/bin/env bash set -ex @@ -109,10 +106,10 @@ RISINGWAVE_ROOT=$(git rev-parse --show-toplevel) cd ${RISINGWAVE_ROOT}/java -mvn install --pl java-binding-benchmark --am -DskipTests=true +mvn install --pl java-binding-benchmark --am -DskipTests=true -Dmaven.javadoc.skip mvn dependency:copy-dependencies --pl java-binding-benchmark -java -cp "java-binding-benchmark/target/dependency/*:java-binding-benchmark/target/java-binding-benchmark-1.0-SNAPSHOT.jar" \ +java -cp "java-binding-benchmark/target/dependency/*:java-binding-benchmark/target/java-binding-benchmark-0.1.0-SNAPSHOT.jar" \ com.risingwave.java.binding.BenchmarkRunner ''' diff --git a/src/java_binding/src/lib.rs b/src/java_binding/src/lib.rs index 12a3c59fc829f..6edf4d29ce557 100644 --- a/src/java_binding/src/lib.rs +++ b/src/java_binding/src/lib.rs @@ -12,9 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![feature(error_generic_member_access)] -#![feature(lazy_cell)] -#![feature(once_cell_try)] -#![feature(type_alias_impl_trait)] +#![feature(result_option_inspect)] -pub use risingwave_jni_core::*; +use std::ffi::c_void; + +use jni::sys::{jint, JNI_VERSION_1_2}; +use jni::JavaVM; +use risingwave_jni_core::register_native_method_for_jvm; + +#[no_mangle] +#[allow(non_snake_case)] +pub extern "system" fn JNI_OnLoad(jvm: JavaVM, _reserved: *mut c_void) -> jint { + let _ = register_native_method_for_jvm(&jvm) + .inspect_err(|_e| eprintln!("unable to register native method")); + JNI_VERSION_1_2 +} diff --git a/src/jni_core/Cargo.toml b/src/jni_core/Cargo.toml index c8bba371c8dea..77cafd155000d 100644 --- a/src/jni_core/Cargo.toml +++ b/src/jni_core/Cargo.toml @@ -10,11 +10,14 @@ ignored = ["workspace-hack"] normal = ["workspace-hack"] [dependencies] +anyhow = "1" bytes = "1" +cfg-or-panic = "0.2" futures = { version = "0.3", default-features = false, features = ["alloc"] } itertools = "0.11" jni = "0.21.1" -prost = "0.11" +paste = "1" +prost = { workspace = true } risingwave_common = { workspace = true } risingwave_hummock_sdk = { workspace = true } risingwave_object_store = { workspace = true } diff --git a/src/jni_core/src/hummock_iterator.rs b/src/jni_core/src/hummock_iterator.rs index 92bb09885f960..a8a79f4b48c45 100644 --- a/src/jni_core/src/hummock_iterator.rs +++ b/src/jni_core/src/hummock_iterator.rs @@ -29,8 +29,8 @@ use risingwave_pb::java_binding::key_range::Bound; use risingwave_pb::java_binding::{KeyRange, ReadPlan}; use risingwave_storage::error::{StorageError, StorageResult}; use risingwave_storage::hummock::local_version::pinned_version::PinnedVersion; -use risingwave_storage::hummock::store::state_store::HummockStorageIterator; use risingwave_storage::hummock::store::version::HummockVersionReader; +use risingwave_storage::hummock::store::HummockStorageIterator; use risingwave_storage::hummock::{CachePolicy, FileCache, SstableStore}; use risingwave_storage::monitor::HummockStateStoreMetrics; use risingwave_storage::row_serde::value_serde::ValueRowSerdeNew; @@ -48,22 +48,6 @@ fn select_all_vnode_stream( pub struct HummockJavaBindingIterator { row_serde: EitherSerde, stream: SelectAllIterStream, - pub class_cache: Arc, -} - -pub struct KeyedRow { - key: Bytes, - row: OwnedRow, -} - -impl KeyedRow { - pub fn key(&self) -> &[u8] { - self.key.as_ref() - } - - pub fn row(&self) -> &OwnedRow { - &self.row - } } impl HummockJavaBindingIterator { @@ -85,6 +69,7 @@ impl HummockJavaBindingIterator { 0, FileCache::none(), FileCache::none(), + None, )); let reader = HummockVersionReader::new(sstable_store, Arc::new(HummockStateStoreMetrics::unused())); @@ -136,24 +121,20 @@ impl HummockJavaBindingIterator { .into() }; - Ok(Self { - row_serde, - stream, - class_cache: Default::default(), - }) + Ok(Self { row_serde, stream }) } - pub async fn next(&mut self) -> StorageResult> { + pub async fn next(&mut self) -> StorageResult> { let item = self.stream.try_next().await?; Ok(match item { - Some((key, value)) => Some(KeyedRow { - key: key.user_key.table_key.0, - row: OwnedRow::new( + Some((key, value)) => Some(( + key.user_key.table_key.0, + OwnedRow::new( self.row_serde .deserialize(&value) .map_err(StorageError::DeserializeRow)?, ), - }), + )), None => None, }) } diff --git a/src/jni_core/src/jvm_runtime.rs b/src/jni_core/src/jvm_runtime.rs new file mode 100644 index 0000000000000..bd1f068b6eaee --- /dev/null +++ b/src/jni_core/src/jvm_runtime.rs @@ -0,0 +1,200 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::option::Option::Some; +use std::ffi::c_void; +use std::fs; +use std::path::Path; +use std::sync::OnceLock; + +use jni::objects::JValueOwned; +use jni::strings::JNIString; +use jni::{InitArgsBuilder, JNIVersion, JavaVM, NativeMethod}; +use risingwave_common::error::{ErrorCode, RwError}; +use risingwave_common::util::resource_util::memory::system_memory_available_bytes; + +/// Use 10% of compute total memory by default. Compute node uses 0.7 * system memory by default. +const DEFAULT_MEMORY_PROPORTION: f64 = 0.07; + +pub static JVM: JavaVmWrapper = JavaVmWrapper::new(); + +pub struct JavaVmWrapper(OnceLock>); + +impl JavaVmWrapper { + const fn new() -> Self { + Self(OnceLock::new()) + } + + pub fn get(&self) -> Option<&Result> { + self.0.get() + } + + pub fn get_or_init(&self) -> Result<&JavaVM, &RwError> { + self.0.get_or_init(Self::inner_new).as_ref() + } + + fn inner_new() -> Result { + let libs_path = if let Ok(libs_path) = std::env::var("CONNECTOR_LIBS_PATH") { + libs_path + } else { + return Err(ErrorCode::InternalError( + "environment variable CONNECTOR_LIBS_PATH is not specified".to_string(), + ) + .into()); + }; + + let dir = Path::new(&libs_path); + + if !dir.is_dir() { + return Err(ErrorCode::InternalError(format!( + "CONNECTOR_LIBS_PATH \"{}\" is not a directory", + libs_path + )) + .into()); + } + + let mut class_vec = vec![]; + + if let Ok(entries) = fs::read_dir(dir) { + for entry in entries.flatten() { + let entry_path = entry.path(); + if entry_path.file_name().is_some() { + let path = std::fs::canonicalize(entry_path)?; + class_vec.push(path.to_str().unwrap().to_string()); + } + } + } else { + return Err(ErrorCode::InternalError(format!( + "failed to read CONNECTOR_LIBS_PATH \"{}\"", + libs_path + )) + .into()); + } + + let jvm_heap_size = if let Ok(heap_size) = std::env::var("JVM_HEAP_SIZE") { + heap_size + } else { + format!( + "{}", + (system_memory_available_bytes() as f64 * DEFAULT_MEMORY_PROPORTION) as usize + ) + }; + + // Build the VM properties + let args_builder = InitArgsBuilder::new() + // Pass the JNI API version (default is 8) + .version(JNIVersion::V8) + .option("-Dis_embedded_connector=true") + .option(format!("-Djava.class.path={}", class_vec.join(":"))) + .option("-Xms16m") + .option(format!("-Xmx{}", jvm_heap_size)); + + tracing::info!("JVM args: {:?}", args_builder); + let jvm_args = args_builder.build().unwrap(); + + // Create a new VM + let jvm = match JavaVM::new(jvm_args) { + Err(err) => { + tracing::error!("fail to new JVM {:?}", err); + return Err(ErrorCode::InternalError("fail to new JVM".to_string()).into()); + } + Ok(jvm) => jvm, + }; + + tracing::info!("initialize JVM successfully"); + + register_native_method_for_jvm(&jvm).unwrap(); + + Ok(jvm) + } +} + +pub fn register_native_method_for_jvm(jvm: &JavaVM) -> Result<(), jni::errors::Error> { + let mut env = jvm + .attach_current_thread() + .inspect_err(|e| tracing::error!("jvm attach thread error: {:?}", e)) + .unwrap(); + + let binding_class = env + .find_class("com/risingwave/java/binding/Binding") + .inspect_err(|e| tracing::error!("jvm find class error: {:?}", e)) + .unwrap(); + use crate::*; + macro_rules! gen_native_method_array { + () => {{ + $crate::for_all_native_methods! {gen_native_method_array} + }}; + ({$({ $func_name:ident, {$($ret:tt)+}, {$($args:tt)*} }),*}) => { + [ + $( + { + let fn_ptr = paste::paste! {[ ]} as *mut c_void; + let sig = $crate::gen_jni_sig! { $($ret)+ ($($args)*)}; + NativeMethod { + name: JNIString::from(stringify! {$func_name}), + sig: JNIString::from(sig), + fn_ptr, + } + }, + )* + ] + } + } + env.register_native_methods(binding_class, &gen_native_method_array!()) + .inspect_err(|e| tracing::error!("jvm register native methods error: {:?}", e))?; + + tracing::info!("register native methods for jvm successfully"); + Ok(()) +} + +/// Load JVM memory statistics from the runtime. If JVM is not initialized or fail to initialize, return zero. +pub fn load_jvm_memory_stats() -> (usize, usize) { + if let Some(jvm) = JVM.get() { + match jvm { + Ok(jvm) => { + let mut env = jvm.attach_current_thread().unwrap(); + let runtime_instance = env + .call_static_method( + "java/lang/Runtime", + "getRuntime", + "()Ljava/lang/Runtime;", + &[], + ) + .unwrap(); + + let runtime_instance = match runtime_instance { + JValueOwned::Object(o) => o, + _ => unreachable!(), + }; + + let total_memory = env + .call_method(runtime_instance.as_ref(), "totalMemory", "()J", &[]) + .unwrap() + .j() + .unwrap(); + + let free_memory = env + .call_method(runtime_instance, "freeMemory", "()J", &[]) + .unwrap() + .j() + .unwrap(); + + (total_memory as usize, (total_memory - free_memory) as usize) + } + Err(_) => (0, 0), + } + } else { + (0, 0) + } +} diff --git a/src/jni_core/src/lib.rs b/src/jni_core/src/lib.rs index be350ae57a460..4815cd7368370 100644 --- a/src/jni_core/src/lib.rs +++ b/src/jni_core/src/lib.rs @@ -19,21 +19,26 @@ #![feature(result_option_inspect)] pub mod hummock_iterator; +pub mod jvm_runtime; +mod macros; pub mod stream_chunk_iterator; use std::backtrace::Backtrace; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use std::slice::from_raw_parts; -use std::sync::{Arc, LazyLock, OnceLock}; +use std::sync::{LazyLock, OnceLock}; -use hummock_iterator::{HummockJavaBindingIterator, KeyedRow}; +use bytes::Bytes; +use cfg_or_panic::cfg_or_panic; use jni::objects::{ AutoElements, GlobalRef, JByteArray, JClass, JMethodID, JObject, JStaticMethodID, JString, JValue, JValueGen, JValueOwned, ReleaseMode, }; use jni::signature::ReturnType; -use jni::sys::{jboolean, jbyte, jdouble, jfloat, jint, jlong, jshort, jsize, jvalue}; +use jni::sys::{ + jboolean, jbyte, jdouble, jfloat, jint, jlong, jshort, jsize, jvalue, JNI_FALSE, JNI_TRUE, +}; use jni::JNIEnv; use prost::{DecodeError, Message}; use risingwave_common::array::{ArrayError, StreamChunk}; @@ -42,19 +47,25 @@ use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::test_prelude::StreamChunkTestExt; use risingwave_common::types::ScalarRefImpl; use risingwave_common::util::panic::rw_catch_unwind; -use risingwave_pb::connector_service::GetEventStreamResponse; +use risingwave_pb::connector_service::{ + GetEventStreamResponse, SinkCoordinatorStreamRequest, SinkCoordinatorStreamResponse, + SinkWriterStreamRequest, SinkWriterStreamResponse, +}; +use risingwave_pb::data::Op; use risingwave_storage::error::StorageError; use thiserror::Error; use tokio::runtime::Runtime; -use tokio::sync::mpsc::Sender; +use tokio::sync::mpsc::{Receiver, Sender}; -use crate::stream_chunk_iterator::{StreamChunkIterator, StreamChunkRow}; +use crate::hummock_iterator::HummockJavaBindingIterator; +pub use crate::jvm_runtime::register_native_method_for_jvm; +use crate::stream_chunk_iterator::{into_iter, StreamChunkRowIterator}; pub type GetEventStreamJniSender = Sender; static RUNTIME: LazyLock = LazyLock::new(|| tokio::runtime::Runtime::new().unwrap()); #[derive(Error, Debug)] -enum BindingError { +pub enum BindingError { #[error("JniError {error}")] Jni { #[from] @@ -86,7 +97,7 @@ enum BindingError { type Result = std::result::Result; -fn to_guarded_slice<'array, 'env>( +pub fn to_guarded_slice<'array, 'env>( array: &'array JByteArray<'env>, env: &'array mut JNIEnv<'env>, ) -> Result> { @@ -139,15 +150,6 @@ impl From for Pointer<'static, T> { } } -impl Pointer<'static, T> { - fn null() -> Self { - Pointer { - pointer: 0, - _phantom: PhantomData, - } - } -} - impl<'a, T> Pointer<'a, T> { fn as_ref(&self) -> &'a T { debug_assert!(self.pointer != 0); @@ -224,12 +226,8 @@ where } } -pub enum JavaBindingRowInner { - Keyed(KeyedRow), - StreamChunk(StreamChunkRow), -} #[derive(Default)] -pub struct JavaClassMethodCache { +struct JavaClassMethodCache { big_decimal_ctor: OnceLock<(GlobalRef, JMethodID)>, timestamp_ctor: OnceLock<(GlobalRef, JMethodID)>, @@ -237,175 +235,209 @@ pub struct JavaClassMethodCache { time_ctor: OnceLock<(GlobalRef, JStaticMethodID)>, } -pub struct JavaBindingRow { - inner: JavaBindingRowInner, - class_cache: Arc, +enum JavaBindingIteratorInner { + Hummock(HummockJavaBindingIterator), + StreamChunk(StreamChunkRowIterator), } -impl JavaBindingRow { - fn with_stream_chunk( - underlying: StreamChunkRow, - class_cache: Arc, - ) -> Self { - Self { - inner: JavaBindingRowInner::StreamChunk(underlying), - class_cache, - } - } +enum RowExtra { + Op(Op), + Key(Bytes), +} - fn with_keyed(underlying: KeyedRow, class_cache: Arc) -> Self { - Self { - inner: JavaBindingRowInner::Keyed(underlying), - class_cache, +impl RowExtra { + fn as_op(&self) -> Op { + match self { + RowExtra::Op(op) => *op, + RowExtra::Key(_) => unreachable!("should be op"), } } - fn as_keyed(&self) -> &KeyedRow { - match &self.inner { - JavaBindingRowInner::Keyed(r) => r, - _ => unreachable!("can only call as_keyed for KeyedRow"), + fn as_key(&self) -> &Bytes { + match self { + RowExtra::Key(key) => key, + RowExtra::Op(_) => unreachable!("should be key"), } } +} - fn as_stream_chunk(&self) -> &StreamChunkRow { - match &self.inner { - JavaBindingRowInner::StreamChunk(r) => r, - _ => unreachable!("can only call as_stream_chunk for StreamChunkRow"), - } - } +struct RowCursor { + row: OwnedRow, + extra: RowExtra, } -impl Deref for JavaBindingRow { +struct JavaBindingIterator { + inner: JavaBindingIteratorInner, + cursor: Option, + class_cache: JavaClassMethodCache, +} + +impl Deref for JavaBindingIterator { type Target = OwnedRow; fn deref(&self) -> &Self::Target { - match &self.inner { - JavaBindingRowInner::Keyed(r) => r.row(), - JavaBindingRowInner::StreamChunk(r) => r.row(), - } + &self + .cursor + .as_ref() + .expect("should exist when call row methods") + .row } } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_vnodeCount( - _env: EnvParam<'_>, -) -> jint { +extern "system" fn Java_com_risingwave_java_binding_Binding_vnodeCount(_env: EnvParam<'_>) -> jint { VirtualNode::COUNT as jint } +#[cfg_or_panic(not(madsim))] #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_hummockIteratorNew<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorNewHummock<'a>( env: EnvParam<'a>, read_plan: JByteArray<'a>, -) -> Pointer<'static, HummockJavaBindingIterator> { +) -> Pointer<'static, JavaBindingIterator> { execute_and_catch(env, move |env| { let read_plan = Message::decode(to_guarded_slice(&read_plan, env)?.deref())?; let iter = RUNTIME.block_on(HummockJavaBindingIterator::new(read_plan))?; + let iter = JavaBindingIterator { + inner: JavaBindingIteratorInner::Hummock(iter), + cursor: None, + class_cache: Default::default(), + }; Ok(iter.into()) }) } +#[cfg_or_panic(not(madsim))] #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_hummockIteratorNext<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorNext<'a>( env: EnvParam<'a>, - mut pointer: Pointer<'a, HummockJavaBindingIterator>, -) -> Pointer<'static, JavaBindingRow> { + mut pointer: Pointer<'a, JavaBindingIterator>, +) -> jboolean { execute_and_catch(env, move |_env| { let iter = pointer.as_mut(); - match RUNTIME.block_on(iter.next())? { - None => Ok(Pointer::null()), - Some(row) => Ok(JavaBindingRow::with_keyed(row, iter.class_cache.clone()).into()), + match &mut iter.inner { + JavaBindingIteratorInner::Hummock(ref mut hummock_iter) => { + match RUNTIME.block_on(hummock_iter.next())? { + None => { + iter.cursor = None; + Ok(JNI_FALSE) + } + Some((key, row)) => { + iter.cursor = Some(RowCursor { + row, + extra: RowExtra::Key(key), + }); + Ok(JNI_TRUE) + } + } + } + JavaBindingIteratorInner::StreamChunk(ref mut stream_chunk_iter) => { + match stream_chunk_iter.next() { + None => { + iter.cursor = None; + Ok(JNI_FALSE) + } + Some((op, row)) => { + iter.cursor = Some(RowCursor { + row, + extra: RowExtra::Op(op), + }); + Ok(JNI_TRUE) + } + } + } } }) } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_hummockIteratorClose( - _env: EnvParam<'_>, - pointer: Pointer<'_, HummockJavaBindingIterator>, +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorClose<'a>( + _env: EnvParam<'a>, + pointer: Pointer<'a, JavaBindingIterator>, ) { - pointer.drop(); + pointer.drop() } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_streamChunkIteratorNew<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorNewFromStreamChunkPayload< + 'a, +>( env: EnvParam<'a>, stream_chunk_payload: JByteArray<'a>, -) -> Pointer<'static, StreamChunkIterator> { +) -> Pointer<'static, JavaBindingIterator> { execute_and_catch(env, move |env| { let prost_stream_chumk = Message::decode(to_guarded_slice(&stream_chunk_payload, env)?.deref())?; - let iter = StreamChunkIterator::new(StreamChunk::from_protobuf(&prost_stream_chumk)?); + let iter = into_iter(StreamChunk::from_protobuf(&prost_stream_chumk)?); + let iter = JavaBindingIterator { + inner: JavaBindingIteratorInner::StreamChunk(iter), + cursor: None, + class_cache: Default::default(), + }; Ok(iter.into()) }) } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_streamChunkIteratorFromPretty< - 'a, ->( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorNewFromStreamChunkPretty<'a>( env: EnvParam<'a>, str: JString<'a>, -) -> Pointer<'static, StreamChunkIterator> { +) -> Pointer<'static, JavaBindingIterator> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { - let iter = StreamChunkIterator::new(StreamChunk::from_pretty( + let iter = into_iter(StreamChunk::from_pretty( env.get_string(&str) .expect("cannot get java string") .to_str() .unwrap(), )); + let iter = JavaBindingIterator { + inner: JavaBindingIteratorInner::StreamChunk(iter), + cursor: None, + class_cache: Default::default(), + }; Ok(iter.into()) }) } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_streamChunkIteratorNext<'a>( - env: EnvParam<'a>, - mut pointer: Pointer<'a, StreamChunkIterator>, -) -> Pointer<'static, JavaBindingRow> { - execute_and_catch(env, move |_env| { - let iter = pointer.as_mut(); - match iter.next() { - None => Ok(Pointer::null()), - Some(row) => { - Ok(JavaBindingRow::with_stream_chunk(row, iter.class_cache.clone()).into()) - } - } - }) -} - -#[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_streamChunkIteratorClose( - _env: EnvParam<'_>, - pointer: Pointer<'_, StreamChunkIterator>, -) { - pointer.drop(); -} - -#[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetKey<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetKey<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, ) -> JByteArray<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { - Ok(env.byte_array_from_slice(pointer.as_ref().as_keyed().key())?) + Ok(env.byte_array_from_slice( + pointer + .as_ref() + .cursor + .as_ref() + .expect("should exists when call get key") + .extra + .as_key() + .as_ref(), + )?) }) } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetOp<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetOp<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, ) -> jint { execute_and_catch(env, move |_env| { - Ok(pointer.as_ref().as_stream_chunk().op() as jint) + Ok(pointer + .as_ref() + .cursor + .as_ref() + .expect("should exist when call get op") + .extra + .as_op() as jint) }) } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowIsNull<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorIsNull<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jboolean { execute_and_catch(env, move |_env| { @@ -414,9 +446,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowIsNull<'a>( } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetInt16Value<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetInt16Value<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jshort { execute_and_catch(env, move |_env| { @@ -429,9 +461,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetInt16Value } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetInt32Value<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetInt32Value<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jint { execute_and_catch(env, move |_env| { @@ -444,9 +476,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetInt32Value } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetInt64Value<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetInt64Value<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jlong { execute_and_catch(env, move |_env| { @@ -459,9 +491,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetInt64Value } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetFloatValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetFloatValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jfloat { execute_and_catch(env, move |_env| { @@ -475,9 +507,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetFloatValue } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetDoubleValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetDoubleValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jdouble { execute_and_catch(env, move |_env| { @@ -491,9 +523,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetDoubleValu } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetBooleanValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetBooleanValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> jboolean { execute_and_catch(env, move |_env| { @@ -502,9 +534,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetBooleanVal } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetStringValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetStringValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JString<'a> { execute_and_catch(env, move |env: &mut EnvParam<'a>| { @@ -513,9 +545,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetStringValu } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetIntervalValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetIntervalValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JString<'a> { execute_and_catch(env, move |env: &mut EnvParam<'a>| { @@ -530,9 +562,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetIntervalVa } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetJsonbValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetJsonbValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JString<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { @@ -547,9 +579,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetJsonbValue } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetTimestampValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetTimestampValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JObject<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { @@ -579,9 +611,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetTimestampV } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetDecimalValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetDecimalValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JObject<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { @@ -616,9 +648,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetDecimalVal } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetDateValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetDateValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JObject<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { @@ -662,9 +694,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetDateValue< } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetTimeValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetTimeValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JObject<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { @@ -708,9 +740,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetTimeValue< } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetByteaValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetByteaValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, ) -> JByteArray<'a> { execute_and_catch(env, move |env: &mut EnvParam<'_>| { @@ -724,9 +756,9 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetByteaValue } #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetArrayValue<'a>( +extern "system" fn Java_com_risingwave_java_binding_Binding_iteratorGetArrayValue<'a>( env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, + pointer: Pointer<'a, JavaBindingIterator>, idx: jint, class: JClass<'a>, ) -> JObject<'a> { @@ -814,27 +846,137 @@ pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowGetArrayValue }) } +/// Send messages to the channel received by `CdcSplitReader`. +/// If msg is null, just check whether the channel is closed. +/// Return true if sending is successful, otherwise, return false so that caller can stop +/// gracefully. #[no_mangle] -pub extern "system" fn Java_com_risingwave_java_binding_Binding_rowClose<'a>( - _env: EnvParam<'a>, - pointer: Pointer<'a, JavaBindingRow>, -) { - pointer.drop() +extern "system" fn Java_com_risingwave_java_binding_Binding_sendCdcSourceMsgToChannel<'a>( + env: EnvParam<'a>, + channel: Pointer<'a, GetEventStreamJniSender>, + msg: JByteArray<'a>, +) -> jboolean { + execute_and_catch(env, move |env| { + // If msg is null means just check whether channel is closed. + if msg.is_null() { + if channel.as_ref().is_closed() { + return Ok(JNI_FALSE); + } else { + return Ok(JNI_TRUE); + } + } + + let get_event_stream_response: GetEventStreamResponse = + Message::decode(to_guarded_slice(&msg, env)?.deref())?; + + match channel.as_ref().blocking_send(get_event_stream_response) { + Ok(_) => Ok(JNI_TRUE), + Err(e) => { + tracing::info!("send error. {:?}", e); + Ok(JNI_FALSE) + } + } + }) +} + +#[no_mangle] +pub extern "system" fn Java_com_risingwave_java_binding_Binding_recvSinkWriterRequestFromChannel< + 'a, +>( + env: EnvParam<'a>, + mut channel: Pointer<'a, Receiver>, +) -> JByteArray<'a> { + execute_and_catch(env, move |env| match channel.as_mut().blocking_recv() { + Some(msg) => { + let bytes = env + .byte_array_from_slice(&Message::encode_to_vec(&msg)) + .unwrap(); + Ok(bytes) + } + None => Ok(JObject::null().into()), + }) +} + +#[no_mangle] +pub extern "system" fn Java_com_risingwave_java_binding_Binding_sendSinkWriterResponseToChannel< + 'a, +>( + env: EnvParam<'a>, + channel: Pointer<'a, Sender>>, + msg: JByteArray<'a>, +) -> jboolean { + execute_and_catch(env, move |env| { + let sink_writer_stream_response: SinkWriterStreamResponse = + Message::decode(to_guarded_slice(&msg, env)?.deref())?; + + match channel + .as_ref() + .blocking_send(Ok(sink_writer_stream_response)) + { + Ok(_) => Ok(JNI_TRUE), + Err(e) => { + tracing::info!("send error. {:?}", e); + Ok(JNI_FALSE) + } + } + }) +} + +#[no_mangle] +pub extern "system" fn Java_com_risingwave_java_binding_Binding_recvSinkCoordinatorRequestFromChannel< + 'a, +>( + env: EnvParam<'a>, + mut channel: Pointer<'a, Receiver>, +) -> JByteArray<'a> { + execute_and_catch(env, move |env| match channel.as_mut().blocking_recv() { + Some(msg) => { + let bytes = env + .byte_array_from_slice(&Message::encode_to_vec(&msg)) + .unwrap(); + Ok(bytes) + } + None => Ok(JObject::null().into()), + }) +} + +#[no_mangle] +pub extern "system" fn Java_com_risingwave_java_binding_Binding_sendSinkCoordinatorResponseToChannel< + 'a, +>( + env: EnvParam<'a>, + channel: Pointer<'a, Sender>, + msg: JByteArray<'a>, +) -> jboolean { + execute_and_catch(env, move |env| { + let sink_coordinator_stream_response: SinkCoordinatorStreamResponse = + Message::decode(to_guarded_slice(&msg, env)?.deref())?; + + match channel + .as_ref() + .blocking_send(sink_coordinator_stream_response) + { + Ok(_) => Ok(JNI_TRUE), + Err(e) => { + tracing::info!("send error. {:?}", e); + Ok(JNI_FALSE) + } + } + }) } #[cfg(test)] mod tests { - use risingwave_common::types::{DataType, Timestamptz}; - use risingwave_expr::vector_op::cast::literal_parsing; + use risingwave_common::types::Timestamptz; /// make sure that the [`ScalarRefImpl::Int64`] received by - /// [`Java_com_risingwave_java_binding_Binding_rowGetTimestampValue`] + /// [`Java_com_risingwave_java_binding_Binding_iteratorGetTimestampValue`] /// is of type [`DataType::Timestamptz`] stored in microseconds #[test] fn test_timestamptz_to_i64() { assert_eq!( - literal_parsing(&DataType::Timestamptz, "2023-06-01 09:45:00+08:00").unwrap(), - Timestamptz::from_micros(1_685_583_900_000_000).into() + "2023-06-01 09:45:00+08:00".parse::().unwrap(), + Timestamptz::from_micros(1_685_583_900_000_000) ); } } diff --git a/src/jni_core/src/macros.rs b/src/jni_core/src/macros.rs new file mode 100644 index 0000000000000..0ca0748fb0206 --- /dev/null +++ b/src/jni_core/src/macros.rs @@ -0,0 +1,353 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[macro_export] +macro_rules! gen_class_name { + ($last:ident) => { + stringify! {$last} + }; + ($first:ident . $($rest:ident).+) => { + concat! {stringify! {$first}, "/", gen_class_name! {$($rest).+} } + } +} + +#[macro_export] +macro_rules! gen_jni_sig_inner { + ($(public)? static native $($rest:tt)*) => { + gen_jni_sig_inner! { $($rest)* } + }; + ($($ret:ident).+ $($func_name:ident)? ($($args:tt)*)) => { + concat! {"(", gen_jni_sig_inner!{$($args)*}, ")", gen_jni_sig_inner! {$($ret).+} } + }; + ($($ret:ident).+ [] $($func_name:ident)? ($($args:tt)*)) => { + concat! {"(", gen_jni_sig_inner!{$($args)*}, ")", gen_jni_sig_inner! {$($ret).+ []} } + }; + (boolean) => { + "Z" + }; + (byte) => { + "B" + }; + (char) => { + "C" + }; + (short) => { + "S" + }; + (int) => { + "I" + }; + (long) => { + "J" + }; + (float) => { + "F" + }; + (double) => { + "D" + }; + (void) => { + "V" + }; + (String) => { + gen_jni_sig_inner! { java.lang.String } + }; + (Object) => { + gen_jni_sig_inner! { java.lang.Object } + }; + (Class) => { + gen_jni_sig_inner! { java.lang.Class } + }; + ($($class_part:ident).+) => { + concat! {"L", gen_class_name! {$($class_part).+}, ";"} + }; + ($($class_part:ident).+ $(.)? [] $($param_name:ident)? $(,$($rest:tt)*)?) => { + concat! { "[", gen_jni_sig_inner! {$($class_part).+}, gen_jni_sig_inner! {$($($rest)*)?}} + }; + (Class $(< ? >)? $($param_name:ident)? $(,$($rest:tt)*)?) => { + concat! { gen_jni_sig_inner! { Class }, gen_jni_sig_inner! {$($($rest)*)?}} + }; + ($($class_part:ident).+ $($param_name:ident)? $(,$($rest:tt)*)?) => { + concat! { gen_jni_sig_inner! {$($class_part).+}, gen_jni_sig_inner! {$($($rest)*)?}} + }; + () => { + "" + }; + ($($invalid:tt)*) => { + compile_error!(concat!("unsupported type {{", stringify!($($invalid)*), "}}")) + }; +} + +#[macro_export] +macro_rules! gen_jni_sig { + ($($input:tt)*) => {{ + // this macro only provide with a expression context + gen_jni_sig_inner! {$($input)*} + }} +} + +#[macro_export] +macro_rules! for_all_plain_native_methods { + ($macro:path $(,$args:tt)*) => { + $macro! { + { + public static native int vnodeCount(); + + // hummock iterator method + // Return a pointer to the iterator + static native long iteratorNewHummock(byte[] readPlan); + + static native boolean iteratorNext(long pointer); + + static native void iteratorClose(long pointer); + + static native long iteratorNewFromStreamChunkPayload(byte[] streamChunkPayload); + + static native long iteratorNewFromStreamChunkPretty(String str); + + static native byte[] iteratorGetKey(long pointer); + + static native int iteratorGetOp(long pointer); + + static native boolean iteratorIsNull(long pointer, int index); + + static native short iteratorGetInt16Value(long pointer, int index); + + static native int iteratorGetInt32Value(long pointer, int index); + + static native long iteratorGetInt64Value(long pointer, int index); + + static native float iteratorGetFloatValue(long pointer, int index); + + static native double iteratorGetDoubleValue(long pointer, int index); + + static native boolean iteratorGetBooleanValue(long pointer, int index); + + static native String iteratorGetStringValue(long pointer, int index); + + static native java.sql.Timestamp iteratorGetTimestampValue(long pointer, int index); + + static native java.math.BigDecimal iteratorGetDecimalValue(long pointer, int index); + + static native java.sql.Time iteratorGetTimeValue(long pointer, int index); + + static native java.sql.Date iteratorGetDateValue(long pointer, int index); + + static native String iteratorGetIntervalValue(long pointer, int index); + + static native String iteratorGetJsonbValue(long pointer, int index); + + static native byte[] iteratorGetByteaValue(long pointer, int index); + + // TODO: object or object array? + static native Object iteratorGetArrayValue(long pointer, int index, Class clazz); + + public static native boolean sendCdcSourceMsgToChannel(long channelPtr, byte[] msg); + + public static native byte[] recvSinkWriterRequestFromChannel(long channelPtr); + + public static native boolean sendSinkWriterResponseToChannel(long channelPtr, byte[] msg); + + public static native byte[] recvSinkCoordinatorRequestFromChannel(long channelPtr); + + public static native boolean sendSinkCoordinatorResponseToChannel(long channelPtr, byte[] msg); + } + $(,$args)* + } + }; +} + +#[macro_export] +macro_rules! for_single_native_method { + ( + {$($ret:tt).+ $func_name:ident ($($args:tt)*)}, + $macro:path + $(,$extra_args:tt)* + ) => { + $macro! { + $func_name, + {$($ret).+}, + {$($args)*} + } + }; + ( + {$($ret:tt).+ [] $func_name:ident ($($args:tt)*)}, + $macro:path + $(,$extra_args:tt)* + ) => { + $macro! { + $func_name, + {$($ret).+ []}, + {$($args)*} + } + }; +} + +#[macro_export] +macro_rules! for_all_native_methods { + ( + {$($input:tt)*}, + $macro:path + $(,$extra_args:tt)* + ) => {{ + $crate::for_all_native_methods! { + {$($input)*}, + {}, + $macro + $(,$extra_args)* + } + }}; + ( + { + $(public)? static native $($ret:tt).+ $func_name:ident($($args:tt)*); $($rest:tt)* + }, + { + $({$prev_func_name:ident, {$($prev_ret:tt)*}, {$($prev_args:tt)*}})* + }, + $macro:path + $(,$extra_args:tt)* + ) => { + $crate::for_all_native_methods! { + {$($rest)*}, + { + $({$prev_func_name, {$($prev_ret)*}, {$($prev_args)*}})* + {$func_name, {$($ret).+}, {$($args)*}} + }, + $macro + $(,$extra_args)* + } + }; + ( + { + $(public)? static native $($ret:tt).+ [] $func_name:ident($($args:tt)*); $($rest:tt)* + }, + { + $({$prev_func_name:ident, {$($prev_ret:tt)*}, {$($prev_args:tt)*}})* + }, + $macro:path + $(,$extra_args:tt)* + ) => { + $crate::for_all_native_methods! { + {$($rest)*}, + { + $({$prev_func_name, {$($prev_ret)*}, {$($prev_args)*}})* + {$func_name, {$($ret).+ []}, {$($args)*}} + }, + $macro + $(,$extra_args)* + } + }; + ( + {}, + { + $({$func_name:ident, {$($ret:tt)*}, {$($args:tt)*}})* + }, + $macro:path + $(,$extra_args:tt)* + ) => { + $macro! { + { + $({$func_name, {$($ret)*}, {$($args)*}}),* + } + $(,$extra_args)* + } + }; + ($macro:path $(,$args:tt)*) => {{ + $crate::for_all_plain_native_methods! { + $crate::for_all_native_methods, + $macro + $(,$args)* + } + }}; +} + +#[cfg(test)] +mod tests { + #[test] + fn test_gen_jni_sig() { + assert_eq!(gen_jni_sig!(int), "I"); + assert_eq!(gen_jni_sig!(boolean f(int, short, byte[])), "(IS[B)Z"); + assert_eq!( + gen_jni_sig!(boolean f(int, short, byte[], java.lang.String)), + "(IS[BLjava/lang/String;)Z" + ); + assert_eq!( + gen_jni_sig!(boolean f(int, java.lang.String)), + "(ILjava/lang/String;)Z" + ); + assert_eq!(gen_jni_sig!(public static native int vnodeCount()), "()I"); + assert_eq!( + gen_jni_sig!(long hummockIteratorNew(byte[] readPlan)), + "([B)J" + ); + assert_eq!(gen_jni_sig!(long hummockIteratorNext(long pointer)), "(J)J"); + assert_eq!( + gen_jni_sig!(void hummockIteratorClose(long pointer)), + "(J)V" + ); + assert_eq!(gen_jni_sig!(byte[] rowGetKey(long pointer)), "(J)[B"); + assert_eq!( + gen_jni_sig!(java.sql.Timestamp rowGetTimestampValue(long pointer, int index)), + "(JI)Ljava/sql/Timestamp;" + ); + assert_eq!( + gen_jni_sig!(String rowGetStringValue(long pointer, int index)), + "(JI)Ljava/lang/String;" + ); + assert_eq!( + gen_jni_sig!(static native Object rowGetArrayValue(long pointer, int index, Class clazz)), + "(JILjava/lang/Class;)Ljava/lang/Object;" + ); + } + + #[test] + fn test_for_all_gen() { + macro_rules! gen_array { + (test) => {{ + for_all_native_methods! { + { + public static native int vnodeCount(); + static native long hummockIteratorNew(byte[] readPlan); + public static native byte[] rowGetKey(long pointer); + }, + gen_array + } + }}; + (all) => {{ + for_all_native_methods! { + gen_array + } + }}; + ({$({ $func_name:ident, {$($ret:tt)+}, {$($args:tt)*} }),*}) => {{ + [ + $( + (stringify! {$func_name}, gen_jni_sig! { $($ret)+ ($($args)*)}), + )* + ] + }}; + } + let sig: [(_, _); 3] = gen_array!(test); + assert_eq!( + sig, + [ + ("vnodeCount", "()I"), + ("hummockIteratorNew", "([B)J"), + ("rowGetKey", "(J)[B") + ] + ); + + let sig = gen_array!(all); + assert!(!sig.is_empty()); + } +} diff --git a/src/jni_core/src/stream_chunk_iterator.rs b/src/jni_core/src/stream_chunk_iterator.rs index d62117a0aa108..49d096d30339e 100644 --- a/src/jni_core/src/stream_chunk_iterator.rs +++ b/src/jni_core/src/stream_chunk_iterator.rs @@ -12,51 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; - use itertools::Itertools; use risingwave_common::array::StreamChunk; use risingwave_common::row::{OwnedRow, Row}; use risingwave_pb::data::Op; -pub struct StreamChunkRow { - op: Op, - row: OwnedRow, -} - -impl StreamChunkRow { - pub fn op(&self) -> Op { - self.op - } - - pub fn row(&self) -> &OwnedRow { - &self.row - } -} - -type StreamChunkRowIterator = impl Iterator + 'static; - -pub struct StreamChunkIterator { - iter: StreamChunkRowIterator, - pub class_cache: Arc, -} - -impl StreamChunkIterator { - pub(crate) fn new(stream_chunk: StreamChunk) -> Self { - Self { - iter: stream_chunk - .rows() - .map(|(op, row_ref)| StreamChunkRow { - op: op.to_protobuf(), - row: row_ref.to_owned_row(), - }) - .collect_vec() - .into_iter(), - class_cache: Default::default(), - } - } +pub(crate) type StreamChunkRowIterator = impl Iterator + 'static; - pub(crate) fn next(&mut self) -> Option { - self.iter.next() - } +pub(crate) fn into_iter(stream_chunk: StreamChunk) -> StreamChunkRowIterator { + stream_chunk + .rows() + .map(|(op, row_ref)| (op.to_protobuf(), row_ref.to_owned_row())) + .collect_vec() + .into_iter() } diff --git a/src/meta/Cargo.toml b/src/meta/Cargo.toml index d8401cc5a7f71..3e96dfcc7be2f 100644 --- a/src/meta/Cargo.toml +++ b/src/meta/Cargo.toml @@ -20,6 +20,7 @@ assert_matches = "1" async-trait = "0.1" aws-config = { workspace = true } aws-sdk-ec2 = { workspace = true } +base64-url = { version = "2.0.0" } bytes = { version = "1", features = ["serde"] } clap = { version = "4", features = ["derive", "env"] } crepe = "0.1" @@ -35,18 +36,18 @@ hyper = "0.14" itertools = "0.11" memcomparable = { version = "0.2" } mime_guess = "2" +model_migration = { path = "src/model_v2/migration" } num-integer = "0.1" num-traits = "0.2" parking_lot = { version = "0.12", features = ["arc_lock"] } prometheus = "0.13" -prometheus-http-query = "0.6" -prost = "0.11" +prometheus-http-query = "0.7" +prost = { workspace = true } rand = "0.8" -regex = "1" reqwest = "0.11" risingwave_backup = { workspace = true } risingwave_common = { workspace = true } -risingwave_common_service = { workspace = true } +risingwave_common_heap_profiling = { workspace = true } risingwave_connector = { workspace = true } risingwave_hummock_sdk = { workspace = true } risingwave_object_store = { workspace = true } @@ -54,6 +55,13 @@ risingwave_pb = { workspace = true } risingwave_rpc_client = { workspace = true } risingwave_sqlparser = { workspace = true } scopeguard = "1.2.0" +sea-orm = { version = "0.12.0", features = [ + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "runtime-tokio-native-tls", + "macros", +] } serde = { version = "1", features = ["derive"] } serde_json = "1" sync-point = { path = "../utils/sync-point" } @@ -84,8 +92,6 @@ assert_matches = "1" maplit = "1.0.2" rand = "0.8" risingwave_test_runner = { workspace = true } -static_assertions = "1" -tempfile = "3" [features] test = [] diff --git a/src/meta/README.md b/src/meta/README.md new file mode 100644 index 0000000000000..3782d765532a4 --- /dev/null +++ b/src/meta/README.md @@ -0,0 +1,9 @@ +## Organization of the meta crates + +We split the meta module into smaller crates in order to speed up compilation. + +- `meta/node` is the final meta node server +- `meta/service` is tonic grpc service implementations. We may further split this into parallel sub-crates. +- The remaining part `meta/src` is the implementation details imported by services. In the future, we can also try to re-organize this into smaller units. + +Refer to [#12924](https://github.com/risingwavelabs/risingwave/pull/12924) for more details. diff --git a/src/meta/node/Cargo.toml b/src/meta/node/Cargo.toml new file mode 100644 index 0000000000000..8c2a5aeadbe41 --- /dev/null +++ b/src/meta/node/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "risingwave_meta_node" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +repository = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["workspace-hack"] + +[dependencies] +anyhow = "1" +clap = { version = "4", features = ["derive", "env"] } +either = "1" +etcd-client = { workspace = true } +futures = { version = "0.3", default-features = false, features = ["alloc"] } +itertools = "0.11" +model_migration = { path = "../src/model_v2/migration" } +prometheus-http-query = "0.7" +regex = "1" +risingwave_common = { workspace = true } +risingwave_common_heap_profiling = { workspace = true } +risingwave_common_service = { workspace = true } +risingwave_meta = { workspace = true } +risingwave_meta_service = { workspace = true } +risingwave_pb = { workspace = true } +risingwave_rpc_client = { workspace = true } +sea-orm = { version = "0.12.0", features = [ + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "runtime-tokio-native-tls", + "macros", +] } +tokio = { version = "0.2", package = "madsim-tokio", features = [ + "rt", + "rt-multi-thread", + "sync", + "macros", + "time", + "signal", +] } +tonic = { workspace = true } +tracing = "0.1" + +[target.'cfg(not(madsim))'.dependencies] +workspace-hack = { path = "../../workspace-hack" } + +[dev-dependencies] + +[lints] +workspace = true diff --git a/src/meta/node/src/lib.rs b/src/meta/node/src/lib.rs new file mode 100644 index 0000000000000..bf1bddad2070f --- /dev/null +++ b/src/meta/node/src/lib.rs @@ -0,0 +1,344 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![feature(lint_reasons)] +#![feature(let_chains)] +#![cfg_attr(coverage, feature(coverage_attribute))] + +mod server; +use std::time::Duration; + +use clap::Parser; +pub use error::{MetaError, MetaResult}; +use risingwave_common::config::OverrideConfig; +use risingwave_common::util::resource_util; +use risingwave_common::{GIT_SHA, RW_VERSION}; +use risingwave_common_heap_profiling::HeapProfiler; +use risingwave_meta::*; +use risingwave_meta_service::*; +pub use rpc::{ElectionClient, ElectionMember, EtcdElectionClient}; +use server::{rpc_serve, MetaStoreSqlBackend}; + +use crate::manager::MetaOpts; + +#[derive(Debug, Clone, Parser, OverrideConfig)] +#[command(version, about = "The central metadata management service")] +pub struct MetaNodeOpts { + #[clap(long, env = "RW_VPC_ID")] + vpc_id: Option, + + #[clap(long, env = "RW_VPC_SECURITY_GROUP_ID")] + security_group_id: Option, + + #[clap(long, env = "RW_LISTEN_ADDR", default_value = "127.0.0.1:5690")] + listen_addr: String, + + /// The address for contacting this instance of the service. + /// This would be synonymous with the service's "public address" + /// or "identifying address". + /// It will serve as a unique identifier in cluster + /// membership and leader election. Must be specified for etcd backend. + #[clap(long, env = "RW_ADVERTISE_ADDR")] + advertise_addr: String, + + #[clap(long, env = "RW_DASHBOARD_HOST")] + dashboard_host: Option, + + #[clap(long, env = "RW_PROMETHEUS_HOST")] + prometheus_host: Option, + + #[clap(long, env = "RW_ETCD_ENDPOINTS", default_value_t = String::from(""))] + etcd_endpoints: String, + + /// Enable authentication with etcd. By default disabled. + #[clap(long, env = "RW_ETCD_AUTH")] + etcd_auth: bool, + + /// Username of etcd, required when --etcd-auth is enabled. + #[clap(long, env = "RW_ETCD_USERNAME", default_value = "")] + etcd_username: String, + + /// Password of etcd, required when --etcd-auth is enabled. + #[clap(long, env = "RW_ETCD_PASSWORD", default_value = "")] + etcd_password: String, + + /// Endpoint of the SQL service, make it non-option when SQL service is required. + #[clap(long, env = "RW_SQL_ENDPOINT")] + sql_endpoint: Option, + + #[clap(long, env = "RW_DASHBOARD_UI_PATH")] + dashboard_ui_path: Option, + + /// For dashboard service to fetch cluster info. + #[clap(long, env = "RW_PROMETHEUS_ENDPOINT")] + prometheus_endpoint: Option, + + /// Endpoint of the connector node, there will be a sidecar connector node + /// colocated with Meta node in the cloud environment + #[clap(long, env = "RW_CONNECTOR_RPC_ENDPOINT")] + pub connector_rpc_endpoint: Option, + + /// Default tag for the endpoint created when creating a privatelink connection. + /// Will be appended to the tags specified in the `tags` field in with clause in `create + /// connection`. + #[clap(long, env = "RW_PRIVATELINK_ENDPOINT_DEFAULT_TAGS")] + pub privatelink_endpoint_default_tags: Option, + + /// The path of `risingwave.toml` configuration file. + /// + /// If empty, default configuration values will be used. + #[clap(long, env = "RW_CONFIG_PATH", default_value = "")] + pub config_path: String, + + #[clap(long, env = "RW_BACKEND", value_enum)] + #[override_opts(path = meta.backend)] + backend: Option, + + /// The interval of periodic barrier. + #[clap(long, env = "RW_BARRIER_INTERVAL_MS")] + #[override_opts(path = system.barrier_interval_ms)] + barrier_interval_ms: Option, + + /// Target size of the Sstable. + #[clap(long, env = "RW_SSTABLE_SIZE_MB")] + #[override_opts(path = system.sstable_size_mb)] + sstable_size_mb: Option, + + /// Size of each block in bytes in SST. + #[clap(long, env = "RW_BLOCK_SIZE_KB")] + #[override_opts(path = system.block_size_kb)] + block_size_kb: Option, + + /// False positive probability of bloom filter. + #[clap(long, env = "RW_BLOOM_FALSE_POSITIVE")] + #[override_opts(path = system.bloom_false_positive)] + bloom_false_positive: Option, + + /// State store url + #[clap(long, env = "RW_STATE_STORE")] + #[override_opts(path = system.state_store)] + state_store: Option, + + /// Remote directory for storing data and metadata objects. + #[clap(long, env = "RW_DATA_DIRECTORY")] + #[override_opts(path = system.data_directory)] + data_directory: Option, + + /// Whether config object storage bucket lifecycle to purge stale data. + #[clap(long, env = "RW_DO_NOT_CONFIG_BUCKET_LIFECYCLE")] + #[override_opts(path = meta.do_not_config_object_storage_lifecycle)] + do_not_config_object_storage_lifecycle: Option, + + /// Remote storage url for storing snapshots. + #[clap(long, env = "RW_BACKUP_STORAGE_URL")] + #[override_opts(path = system.backup_storage_url)] + backup_storage_url: Option, + + /// Remote directory for storing snapshots. + #[clap(long, env = "RW_BACKUP_STORAGE_DIRECTORY")] + #[override_opts(path = system.backup_storage_directory)] + backup_storage_directory: Option, + + #[clap(long, env = "RW_OBJECT_STORE_STREAMING_READ_TIMEOUT_MS", value_enum)] + #[override_opts(path = storage.object_store_streaming_read_timeout_ms)] + pub object_store_streaming_read_timeout_ms: Option, + #[clap(long, env = "RW_OBJECT_STORE_STREAMING_UPLOAD_TIMEOUT_MS", value_enum)] + #[override_opts(path = storage.object_store_streaming_upload_timeout_ms)] + pub object_store_streaming_upload_timeout_ms: Option, + #[clap(long, env = "RW_OBJECT_STORE_UPLOAD_TIMEOUT_MS", value_enum)] + #[override_opts(path = storage.object_store_upload_timeout_ms)] + pub object_store_upload_timeout_ms: Option, + #[clap(long, env = "RW_OBJECT_STORE_READ_TIMEOUT_MS", value_enum)] + #[override_opts(path = storage.object_store_read_timeout_ms)] + pub object_store_read_timeout_ms: Option, + + /// Enable heap profile dump when memory usage is high. + #[clap(long, env = "RW_HEAP_PROFILING_DIR")] + #[override_opts(path = server.heap_profiling.dir)] + pub heap_profiling_dir: Option, +} + +use std::future::Future; +use std::pin::Pin; + +use risingwave_common::config::{load_config, MetaBackend, RwConfig}; +use tracing::info; + +/// Start meta node +pub fn start(opts: MetaNodeOpts) -> Pin + Send>> { + // WARNING: don't change the function signature. Making it `async fn` will cause + // slow compile in release mode. + Box::pin(async move { + info!("Starting meta node"); + info!("> options: {:?}", opts); + let config = load_config(&opts.config_path, &opts); + info!("> config: {:?}", config); + info!("> version: {} ({})", RW_VERSION, GIT_SHA); + let listen_addr = opts.listen_addr.parse().unwrap(); + let dashboard_addr = opts.dashboard_host.map(|x| x.parse().unwrap()); + let prometheus_addr = opts.prometheus_host.map(|x| x.parse().unwrap()); + let backend = match config.meta.backend { + MetaBackend::Etcd => MetaStoreBackend::Etcd { + endpoints: opts + .etcd_endpoints + .split(',') + .map(|x| x.to_string()) + .collect(), + credentials: match opts.etcd_auth { + true => Some((opts.etcd_username, opts.etcd_password)), + false => None, + }, + }, + MetaBackend::Mem => MetaStoreBackend::Mem, + }; + let sql_backend = opts + .sql_endpoint + .map(|endpoint| MetaStoreSqlBackend { endpoint }); + + validate_config(&config); + + let total_memory_bytes = resource_util::memory::system_memory_available_bytes(); + let heap_profiler = + HeapProfiler::new(total_memory_bytes, config.server.heap_profiling.clone()); + // Run a background heap profiler + heap_profiler.start(); + + let max_heartbeat_interval = + Duration::from_secs(config.meta.max_heartbeat_interval_secs as u64); + let max_idle_ms = config.meta.dangerous_max_idle_secs.unwrap_or(0) * 1000; + let in_flight_barrier_nums = config.streaming.in_flight_barrier_nums; + let privatelink_endpoint_default_tags = + opts.privatelink_endpoint_default_tags.map(|tags| { + tags.split(',') + .map(|s| { + let key_val = s.split_once('=').unwrap(); + (key_val.0.to_string(), key_val.1.to_string()) + }) + .collect() + }); + + let add_info = AddressInfo { + advertise_addr: opts.advertise_addr, + listen_addr, + prometheus_addr, + dashboard_addr, + ui_path: opts.dashboard_ui_path, + }; + + let (mut join_handle, leader_lost_handle, shutdown_send) = rpc_serve( + add_info, + backend, + sql_backend, + max_heartbeat_interval, + config.meta.meta_leader_lease_secs, + MetaOpts { + enable_recovery: !config.meta.disable_recovery, + in_flight_barrier_nums, + max_idle_ms, + compaction_deterministic_test: config.meta.enable_compaction_deterministic, + default_parallelism: config.meta.default_parallelism, + vacuum_interval_sec: config.meta.vacuum_interval_sec, + vacuum_spin_interval_ms: config.meta.vacuum_spin_interval_ms, + hummock_version_checkpoint_interval_sec: config + .meta + .hummock_version_checkpoint_interval_sec, + min_delta_log_num_for_hummock_version_checkpoint: config + .meta + .min_delta_log_num_for_hummock_version_checkpoint, + min_sst_retention_time_sec: config.meta.min_sst_retention_time_sec, + full_gc_interval_sec: config.meta.full_gc_interval_sec, + collect_gc_watermark_spin_interval_sec: config + .meta + .collect_gc_watermark_spin_interval_sec, + enable_committed_sst_sanity_check: config.meta.enable_committed_sst_sanity_check, + periodic_compaction_interval_sec: config.meta.periodic_compaction_interval_sec, + node_num_monitor_interval_sec: config.meta.node_num_monitor_interval_sec, + prometheus_endpoint: opts.prometheus_endpoint, + vpc_id: opts.vpc_id, + security_group_id: opts.security_group_id, + connector_rpc_endpoint: opts.connector_rpc_endpoint, + privatelink_endpoint_default_tags, + periodic_space_reclaim_compaction_interval_sec: config + .meta + .periodic_space_reclaim_compaction_interval_sec, + telemetry_enabled: config.server.telemetry_enabled, + periodic_ttl_reclaim_compaction_interval_sec: config + .meta + .periodic_ttl_reclaim_compaction_interval_sec, + periodic_tombstone_reclaim_compaction_interval_sec: config + .meta + .periodic_tombstone_reclaim_compaction_interval_sec, + periodic_split_compact_group_interval_sec: config + .meta + .periodic_split_compact_group_interval_sec, + split_group_size_limit: config.meta.split_group_size_limit, + min_table_split_size: config.meta.move_table_size_limit, + table_write_throughput_threshold: config.meta.table_write_throughput_threshold, + min_table_split_write_throughput: config.meta.min_table_split_write_throughput, + partition_vnode_count: config.meta.partition_vnode_count, + do_not_config_object_storage_lifecycle: config + .meta + .do_not_config_object_storage_lifecycle, + compaction_task_max_heartbeat_interval_secs: config + .meta + .compaction_task_max_heartbeat_interval_secs, + compaction_config: Some(config.meta.compaction_config), + }, + config.system.into_init_system_params(), + ) + .await + .unwrap(); + + tracing::info!("Meta server listening at {}", listen_addr); + + match leader_lost_handle { + None => { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("receive ctrl+c"); + shutdown_send.send(()).unwrap(); + join_handle.await.unwrap() + } + res = &mut join_handle => res.unwrap(), + }; + } + Some(mut handle) => { + tokio::select! { + _ = &mut handle => { + tracing::info!("receive leader lost signal"); + // When we lose leadership, we will exit as soon as possible. + } + _ = tokio::signal::ctrl_c() => { + tracing::info!("receive ctrl+c"); + shutdown_send.send(()).unwrap(); + join_handle.await.unwrap(); + handle.abort(); + } + res = &mut join_handle => { + res.unwrap(); + handle.abort(); + }, + }; + } + }; + }) +} + +fn validate_config(config: &RwConfig) { + if config.meta.meta_leader_lease_secs <= 2 { + let error_msg = "meta leader lease secs should be larger than 2"; + tracing::error!(error_msg); + panic!("{}", error_msg); + } +} diff --git a/src/meta/src/rpc/server.rs b/src/meta/node/src/server.rs similarity index 80% rename from src/meta/src/rpc/server.rs rename to src/meta/node/src/server.rs index 0f1b06a0dd702..d922f1c37e033 100644 --- a/src/meta/src/rpc/server.rs +++ b/src/meta/node/src/server.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -20,14 +19,31 @@ use either::Either; use etcd_client::ConnectOptions; use futures::future::join_all; use itertools::Itertools; +use model_migration::{Migrator, MigratorTrait}; use regex::Regex; use risingwave_common::monitor::connection::{RouterExt, TcpConfig}; use risingwave_common::telemetry::manager::TelemetryManager; use risingwave_common::telemetry::telemetry_env_enabled; use risingwave_common_service::metrics_manager::MetricsManager; use risingwave_common_service::tracing::TracingExtractLayer; -use risingwave_object_store::object::object_metrics::ObjectStoreMetrics; -use risingwave_object_store::object::parse_remote_object_store; +use risingwave_meta::rpc::intercept::MetricsMiddlewareLayer; +use risingwave_meta::rpc::ElectionClientRef; +use risingwave_meta_service::backup_service::BackupServiceImpl; +use risingwave_meta_service::cloud_service::CloudServiceImpl; +use risingwave_meta_service::cluster_service::ClusterServiceImpl; +use risingwave_meta_service::ddl_service::DdlServiceImpl; +use risingwave_meta_service::health_service::HealthServiceImpl; +use risingwave_meta_service::heartbeat_service::HeartbeatServiceImpl; +use risingwave_meta_service::hummock_service::HummockServiceImpl; +use risingwave_meta_service::meta_member_service::MetaMemberServiceImpl; +use risingwave_meta_service::notification_service::NotificationServiceImpl; +use risingwave_meta_service::scale_service::ScaleServiceImpl; +use risingwave_meta_service::serving_service::ServingServiceImpl; +use risingwave_meta_service::sink_coordination_service::SinkCoordinationServiceImpl; +use risingwave_meta_service::stream_service::StreamServiceImpl; +use risingwave_meta_service::system_params_service::SystemParamsServiceImpl; +use risingwave_meta_service::telemetry_service::TelemetryInfoServiceImpl; +use risingwave_meta_service::user_service::UserServiceImpl; use risingwave_pb::backup_service::backup_service_server::BackupServiceServer; use risingwave_pb::cloud_service::cloud_service_server::CloudServiceServer; use risingwave_pb::connector_service::sink_coordination_service_server::SinkCoordinationServiceServer; @@ -46,19 +62,16 @@ use risingwave_pb::meta::telemetry_info_service_server::TelemetryInfoServiceServ use risingwave_pb::meta::SystemParams; use risingwave_pb::user::user_service_server::UserServiceServer; use risingwave_rpc_client::ComputeClientPool; +use sea_orm::{ConnectionTrait, DbBackend}; use tokio::sync::oneshot::{channel as OneChannel, Receiver as OneReceiver}; use tokio::sync::watch; use tokio::sync::watch::{Receiver as WatchReceiver, Sender as WatchSender}; use tokio::task::JoinHandle; -use super::intercept::MetricsMiddlewareLayer; -use super::service::health_service::HealthServiceImpl; -use super::service::notification_service::NotificationServiceImpl; -use super::service::scale_service::ScaleServiceImpl; -use super::service::serving_service::ServingServiceImpl; -use super::DdlServiceImpl; use crate::backup_restore::BackupManager; use crate::barrier::{BarrierScheduler, GlobalBarrierManager}; +use crate::controller::system_param::SystemParamsController; +use crate::controller::SqlMetaStore; use crate::hummock::HummockManager; use crate::manager::sink_coordination::SinkCoordinatorManager; use crate::manager::{ @@ -66,21 +79,13 @@ use crate::manager::{ SystemParamsManager, }; use crate::rpc::cloud_provider::AwsEc2Client; -use crate::rpc::election_client::{ElectionClient, EtcdElectionClient}; +use crate::rpc::election::etcd::EtcdElectionClient; +use crate::rpc::election::sql::{ + MySqlDriver, PostgresDriver, SqlBackendElectionClient, SqliteDriver, +}; use crate::rpc::metrics::{ start_fragment_info_monitor, start_worker_info_monitor, GLOBAL_META_METRICS, }; -use crate::rpc::service::backup_service::BackupServiceImpl; -use crate::rpc::service::cloud_service::CloudServiceImpl; -use crate::rpc::service::cluster_service::ClusterServiceImpl; -use crate::rpc::service::heartbeat_service::HeartbeatServiceImpl; -use crate::rpc::service::hummock_service::HummockServiceImpl; -use crate::rpc::service::meta_member_service::MetaMemberServiceImpl; -use crate::rpc::service::sink_coordination_service::SinkCoordinationServiceImpl; -use crate::rpc::service::stream_service::StreamServiceImpl; -use crate::rpc::service::system_params_service::SystemParamsServiceImpl; -use crate::rpc::service::telemetry_service::TelemetryInfoServiceImpl; -use crate::rpc::service::user_service::UserServiceImpl; use crate::serving::ServingVnodeMapping; use crate::storage::{ EtcdMetaStore, MemStore, MetaStore, MetaStoreBoxExt, MetaStoreRef, @@ -89,47 +94,56 @@ use crate::storage::{ use crate::stream::{GlobalStreamManager, SourceManager}; use crate::telemetry::{MetaReportCreator, MetaTelemetryInfoFetcher}; use crate::{hummock, serving, MetaError, MetaResult}; - #[derive(Debug)] -pub enum MetaStoreBackend { - Etcd { - endpoints: Vec, - credentials: Option<(String, String)>, - }, - Mem, -} - -#[derive(Clone)] -pub struct AddressInfo { - pub advertise_addr: String, - pub listen_addr: SocketAddr, - pub prometheus_addr: Option, - pub dashboard_addr: Option, - pub ui_path: Option, -} - -impl Default for AddressInfo { - fn default() -> Self { - Self { - advertise_addr: "".to_string(), - listen_addr: SocketAddr::V4("127.0.0.1:0000".parse().unwrap()), - prometheus_addr: None, - dashboard_addr: None, - ui_path: None, - } - } +pub struct MetaStoreSqlBackend { + pub(crate) endpoint: String, } -pub type ElectionClientRef = Arc; +use risingwave_meta::MetaStoreBackend; +use risingwave_meta_service::AddressInfo; pub async fn rpc_serve( address_info: AddressInfo, meta_store_backend: MetaStoreBackend, + meta_store_sql_backend: Option, max_cluster_heartbeat_interval: Duration, lease_interval_secs: u64, opts: MetaOpts, init_system_params: SystemParams, ) -> MetaResult<(JoinHandle<()>, Option>, WatchSender<()>)> { + let meta_store_sql = match meta_store_sql_backend { + Some(backend) => { + let mut options = sea_orm::ConnectOptions::new(backend.endpoint); + options + .max_connections(20) + .connect_timeout(Duration::from_secs(10)) + .idle_timeout(Duration::from_secs(30)); + let conn = sea_orm::Database::connect(options).await?; + Some(SqlMetaStore::new(conn)) + } + None => None, + }; + + let mut election_client = if let Some(sql_store) = &meta_store_sql { + let id = address_info.advertise_addr.clone(); + let conn = sql_store.conn.clone(); + let election_client: ElectionClientRef = match conn.get_database_backend() { + DbBackend::Sqlite => { + Arc::new(SqlBackendElectionClient::new(id, SqliteDriver::new(conn))) + } + DbBackend::Postgres => { + Arc::new(SqlBackendElectionClient::new(id, PostgresDriver::new(conn))) + } + DbBackend::MySql => Arc::new(SqlBackendElectionClient::new(id, MySqlDriver::new(conn))), + }; + + election_client.init().await?; + + Some(election_client) + } else { + None + }; + match meta_store_backend { MetaStoreBackend::Etcd { endpoints, @@ -147,25 +161,28 @@ pub async fn rpc_serve( .map_err(|e| anyhow::anyhow!("failed to connect etcd {}", e))?; let meta_store = EtcdMetaStore::new(client).into_ref(); - // `with_keep_alive` option will break the long connection in election client. - let mut election_options = ConnectOptions::default(); - if let Some((username, password)) = &credentials { - election_options = election_options.with_user(username, password) - } + if election_client.is_none() { + // `with_keep_alive` option will break the long connection in election client. + let mut election_options = ConnectOptions::default(); + if let Some((username, password)) = &credentials { + election_options = election_options.with_user(username, password) + } - let election_client = Arc::new( - EtcdElectionClient::new( - endpoints, - Some(election_options), - auth_enabled, - address_info.advertise_addr.clone(), - ) - .await?, - ); + election_client = Some(Arc::new( + EtcdElectionClient::new( + endpoints, + Some(election_options), + auth_enabled, + address_info.advertise_addr.clone(), + ) + .await?, + )); + } rpc_serve_with_store( meta_store, - Some(election_client), + election_client, + meta_store_sql, address_info, max_cluster_heartbeat_interval, lease_interval_secs, @@ -177,7 +194,8 @@ pub async fn rpc_serve( let meta_store = MemStore::new().into_ref(); rpc_serve_with_store( meta_store, - None, + election_client, + meta_store_sql, address_info, max_cluster_heartbeat_interval, lease_interval_secs, @@ -192,6 +210,7 @@ pub async fn rpc_serve( pub fn rpc_serve_with_store( meta_store: MetaStoreRef, election_client: Option, + meta_store_sql: Option, address_info: AddressInfo, max_cluster_heartbeat_interval: Duration, lease_interval_secs: u64, @@ -274,6 +293,7 @@ pub fn rpc_serve_with_store( start_service_as_election_leader( meta_store, + meta_store_sql, address_info, max_cluster_heartbeat_interval, opts, @@ -345,6 +365,7 @@ pub async fn start_service_as_election_follower( /// Returns an error if the service initialization failed pub async fn start_service_as_election_leader( meta_store: MetaStoreRef, + meta_store_sql: Option, address_info: AddressInfo, max_cluster_heartbeat_interval: Duration, opts: MetaOpts, @@ -353,12 +374,31 @@ pub async fn start_service_as_election_leader( mut svc_shutdown_rx: WatchReceiver<()>, ) -> MetaResult<()> { tracing::info!("Defining leader services"); + if let Some(sql_store) = &meta_store_sql { + // Try to upgrade if any new model changes are added. + Migrator::up(&sql_store.conn, None) + .await + .expect("Failed to upgrade models in meta store"); + } + let prometheus_endpoint = opts.prometheus_endpoint.clone(); - let env = MetaSrvEnv::new(opts, init_system_params, meta_store.clone()).await?; + let env = MetaSrvEnv::new( + opts.clone(), + init_system_params, + meta_store.clone(), + meta_store_sql.clone(), + ) + .await?; let fragment_manager = Arc::new(FragmentManager::new(env.clone()).await.unwrap()); let system_params_manager = env.system_params_manager_ref(); - let system_params_reader = system_params_manager.get_params().await; + let mut system_params_reader = system_params_manager.get_params().await; + + // Using new reader instead if the controller is set. + let system_params_controller = env.system_params_controller_ref(); + if let Some(ctl) = &system_params_controller { + system_params_reader = ctl.get_params().await; + } let data_directory = system_params_reader.data_directory(); if !is_correct_data_directory(data_directory) { @@ -428,8 +468,9 @@ pub async fn start_service_as_election_leader( fragment_manager: fragment_manager.clone(), compute_clients: ComputeClientPool::default(), meta_store: env.meta_store_ref(), + ui_path: address_info.ui_path, }; - let task = tokio::spawn(dashboard_service.serve(address_info.ui_path)); + let task = tokio::spawn(dashboard_service.serve()); Some(task) } else { None @@ -453,8 +494,7 @@ pub async fn start_service_as_election_leader( .unwrap(), ); - let (sink_manager, shutdown_handle) = - SinkCoordinatorManager::start_worker(env.connector_client()); + let (sink_manager, shutdown_handle) = SinkCoordinatorManager::start_worker(); let mut sub_tasks = vec![shutdown_handle]; let barrier_manager = Arc::new(GlobalBarrierManager::new( @@ -514,15 +554,6 @@ pub async fn start_service_as_election_leader( backup_manager.clone(), compactor_manager.clone(), )); - // Just validates the URL is valid. Panics if not. - let _wasm_object_store = Arc::new( - parse_remote_object_store( - system_params_reader.wasm_storage_url(), - Arc::new(ObjectStoreMetrics::unused()), - "Wasm Engine", - ) - .await, - ); let mut aws_cli = None; if let Some(my_vpc_id) = &env.opts.vpc_id @@ -581,8 +612,11 @@ pub async fn start_service_as_election_leader( ); let health_srv = HealthServiceImpl::new(); let backup_srv = BackupServiceImpl::new(backup_manager); - let telemetry_srv = TelemetryInfoServiceImpl::new(meta_store.clone()); - let system_params_srv = SystemParamsServiceImpl::new(system_params_manager.clone()); + let telemetry_srv = TelemetryInfoServiceImpl::new(meta_store.clone(), env.sql_meta_store()); + let system_params_srv = SystemParamsServiceImpl::new( + system_params_manager.clone(), + system_params_controller.clone(), + ); let serving_srv = ServingServiceImpl::new(serving_vnode_mapping.clone(), fragment_manager.clone()); let cloud_srv = CloudServiceImpl::new(catalog_manager.clone(), aws_cli); @@ -598,28 +632,28 @@ pub async fn start_service_as_election_leader( // compaction_scheduler, &env.opts, )); - sub_tasks.push( - start_worker_info_monitor( - cluster_manager.clone(), - election_client.clone(), - Duration::from_secs(env.opts.node_num_monitor_interval_sec), - meta_metrics.clone(), - ) - .await, - ); - sub_tasks.push( - start_fragment_info_monitor( - cluster_manager.clone(), - catalog_manager, - fragment_manager.clone(), - hummock_manager.clone(), - meta_metrics.clone(), - ) - .await, - ); - sub_tasks.push(SystemParamsManager::start_params_notifier( - system_params_manager.clone(), + sub_tasks.push(start_worker_info_monitor( + cluster_manager.clone(), + election_client.clone(), + Duration::from_secs(env.opts.node_num_monitor_interval_sec), + meta_metrics.clone(), + )); + sub_tasks.push(start_fragment_info_monitor( + cluster_manager.clone(), + catalog_manager, + fragment_manager.clone(), + hummock_manager.clone(), + meta_metrics.clone(), )); + if let Some(system_params_ctl) = system_params_controller { + sub_tasks.push(SystemParamsController::start_params_notifier( + system_params_ctl, + )); + } else { + sub_tasks.push(SystemParamsManager::start_params_notifier( + system_params_manager.clone(), + )); + } sub_tasks.push(HummockManager::hummock_timer_task(hummock_manager.clone())); sub_tasks.push(HummockManager::compaction_event_loop( hummock_manager, @@ -702,8 +736,10 @@ pub async fn start_service_as_election_leader( // Persist params before starting services so that invalid params that cause meta node // to crash will not be persisted. - system_params_manager.flush_params().await?; - env.cluster_id().put_at_meta_store(&meta_store).await?; + if meta_store_sql.is_none() { + system_params_manager.flush_params().await?; + env.cluster_id().put_at_meta_store(&meta_store).await?; + } tracing::info!("Assigned cluster id {:?}", *env.cluster_id()); tracing::info!("Starting meta services"); diff --git a/src/meta/service/Cargo.toml b/src/meta/service/Cargo.toml new file mode 100644 index 0000000000000..1760ccd56a85a --- /dev/null +++ b/src/meta/service/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "risingwave_meta_service" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +repository = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["workspace-hack"] + +[dependencies] +anyhow = "1" +async-trait = "0.1" +either = "1" +futures = { version = "0.3", default-features = false, features = ["alloc"] } +itertools = "0.11" +regex = "1" +risingwave_common = { workspace = true } +risingwave_connector = { workspace = true } +risingwave_meta = { workspace = true } +risingwave_pb = { workspace = true } +sea-orm = { version = "0.12.0", features = [ + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "runtime-tokio-native-tls", + "macros", +] } +sync-point = { path = "../../utils/sync-point" } +tokio = { version = "0.2", package = "madsim-tokio", features = [ + "rt", + "rt-multi-thread", + "sync", + "macros", + "time", + "signal", +] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = { workspace = true } +tracing = "0.1" + +[target.'cfg(not(madsim))'.dependencies] +workspace-hack = { path = "../../workspace-hack" } + +[dev-dependencies] + +[lints] +workspace = true diff --git a/src/meta/src/rpc/service/backup_service.rs b/src/meta/service/src/backup_service.rs similarity index 94% rename from src/meta/src/rpc/service/backup_service.rs rename to src/meta/service/src/backup_service.rs index 22897d8bb770e..d83b4d0d1e8e4 100644 --- a/src/meta/src/rpc/service/backup_service.rs +++ b/src/meta/service/src/backup_service.rs @@ -49,10 +49,11 @@ impl BackupService for BackupServiceImpl { request: Request, ) -> Result, Status> { let job_id = request.into_inner().job_id; - let job_status = self.backup_manager.get_backup_job_status(job_id).await? as _; + let (job_status, message) = self.backup_manager.get_backup_job_status(job_id); Ok(Response::new(GetBackupJobStatusResponse { job_id, - job_status, + job_status: job_status as _, + message, })) } diff --git a/src/meta/src/rpc/service/cloud_service.rs b/src/meta/service/src/cloud_service.rs similarity index 78% rename from src/meta/src/rpc/service/cloud_service.rs rename to src/meta/service/src/cloud_service.rs index 141abd9a0aab9..21cd77ff42fae 100644 --- a/src/meta/src/rpc/service/cloud_service.rs +++ b/src/meta/service/src/cloud_service.rs @@ -17,9 +17,10 @@ use std::sync::LazyLock; use async_trait::async_trait; use regex::Regex; +use risingwave_connector::dispatch_source_prop; use risingwave_connector::source::kafka::private_link::insert_privatelink_broker_rewrite_map; use risingwave_connector::source::{ - ConnectorProperties, SourceEnumeratorContext, SplitEnumeratorImpl, + ConnectorProperties, SourceEnumeratorContext, SourceProperties, SplitEnumerator, }; use risingwave_pb::catalog::connection::Info::PrivateLinkService; use risingwave_pb::cloud_service::cloud_service_server::CloudService; @@ -116,7 +117,9 @@ impl CloudService for CloudServiceImpl { } _ => (), }; - if let Err(e) = insert_privatelink_broker_rewrite_map(&service, &mut source_cfg) { + if let Err(e) = + insert_privatelink_broker_rewrite_map(&mut source_cfg, Some(&service), None) + { return Ok(new_rwc_validate_fail_response( ErrorType::PrivatelinkResolveErr, e.to_string(), @@ -139,36 +142,43 @@ impl CloudService for CloudServiceImpl { e.to_string(), )); }; - let enumerator = - SplitEnumeratorImpl::create(props.unwrap(), SourceEnumeratorContext::default().into()) - .await; - if let Err(e) = enumerator { - return Ok(new_rwc_validate_fail_response( - ErrorType::KafkaInvalidProperties, - e.to_string(), - )); + + async fn new_enumerator( + props: P, + ) -> Result { + P::SplitEnumerator::new(props, SourceEnumeratorContext::default().into()).await } - if let Err(e) = enumerator.unwrap().list_splits().await { - let error_message = e.to_string(); - if error_message.contains("BrokerTransportFailure") { + + dispatch_source_prop!(props.unwrap(), props, { + let enumerator = new_enumerator(*props).await; + if let Err(e) = enumerator { return Ok(new_rwc_validate_fail_response( - ErrorType::KafkaBrokerUnreachable, + ErrorType::KafkaInvalidProperties, e.to_string(), )); } - static TOPIC_NOT_FOUND: LazyLock = - LazyLock::new(|| Regex::new(r"topic .* not found").unwrap()); - if TOPIC_NOT_FOUND.is_match(error_message.as_str()) { + if let Err(e) = enumerator.unwrap().list_splits().await { + let error_message = e.to_string(); + if error_message.contains("BrokerTransportFailure") { + return Ok(new_rwc_validate_fail_response( + ErrorType::KafkaBrokerUnreachable, + e.to_string(), + )); + } + static TOPIC_NOT_FOUND: LazyLock = + LazyLock::new(|| Regex::new(r"topic .* not found").unwrap()); + if TOPIC_NOT_FOUND.is_match(error_message.as_str()) { + return Ok(new_rwc_validate_fail_response( + ErrorType::KafkaTopicNotFound, + e.to_string(), + )); + } return Ok(new_rwc_validate_fail_response( - ErrorType::KafkaTopicNotFound, + ErrorType::KafkaOther, e.to_string(), )); } - return Ok(new_rwc_validate_fail_response( - ErrorType::KafkaOther, - e.to_string(), - )); - } + }); Ok(Response::new(RwCloudValidateSourceResponse { ok: true, error: None, diff --git a/src/meta/src/rpc/service/cluster_service.rs b/src/meta/service/src/cluster_service.rs similarity index 100% rename from src/meta/src/rpc/service/cluster_service.rs rename to src/meta/service/src/cluster_service.rs diff --git a/src/meta/src/rpc/service/ddl_service.rs b/src/meta/service/src/ddl_service.rs similarity index 89% rename from src/meta/src/rpc/service/ddl_service.rs rename to src/meta/service/src/ddl_service.rs index a744ba910198d..061ff93589163 100644 --- a/src/meta/src/rpc/service/ddl_service.rs +++ b/src/meta/service/src/ddl_service.rs @@ -25,11 +25,12 @@ use risingwave_pb::catalog::connection::private_link_service::{ use risingwave_pb::catalog::connection::PbPrivateLinkService; use risingwave_pb::catalog::source::OptionalAssociatedTableId; use risingwave_pb::catalog::table::OptionalAssociatedSourceId; -use risingwave_pb::catalog::{connection, Connection}; +use risingwave_pb::catalog::{connection, Connection, CreateType, PbSource, PbTable}; use risingwave_pb::ddl_service::ddl_service_server::DdlService; use risingwave_pb::ddl_service::drop_table_request::PbSourceId; use risingwave_pb::ddl_service::*; use risingwave_pb::stream_plan::stream_node::NodeBody; +use risingwave_pb::stream_plan::PbStreamFragmentGraph; use tonic::{Request, Response, Status}; use crate::barrier::BarrierManagerRef; @@ -228,7 +229,11 @@ impl DdlService for DdlServiceImpl { let version = self .ddl_controller - .run_command(DdlCommand::CreateStreamingJob(stream_job, fragment_graph)) + .run_command(DdlCommand::CreateStreamingJob( + stream_job, + fragment_graph, + CreateType::Foreground, + )) .await?; Ok(Response::new(CreateSinkResponse { @@ -271,6 +276,7 @@ impl DdlService for DdlServiceImpl { let req = request.into_inner(); let mview = req.get_materialized_view()?.clone(); + let create_type = mview.get_create_type().unwrap_or(CreateType::Foreground); let fragment_graph = req.get_fragment_graph()?.clone(); let mut stream_job = StreamingJob::MaterializedView(mview); @@ -279,7 +285,11 @@ impl DdlService for DdlServiceImpl { let version = self .ddl_controller - .run_command(DdlCommand::CreateStreamingJob(stream_job, fragment_graph)) + .run_command(DdlCommand::CreateStreamingJob( + stream_job, + fragment_graph, + create_type, + )) .await?; Ok(Response::new(CreateMaterializedViewResponse { @@ -330,7 +340,11 @@ impl DdlService for DdlServiceImpl { let version = self .ddl_controller - .run_command(DdlCommand::CreateStreamingJob(stream_job, fragment_graph)) + .run_command(DdlCommand::CreateStreamingJob( + stream_job, + fragment_graph, + CreateType::Foreground, + )) .await?; Ok(Response::new(CreateIndexResponse { @@ -413,30 +427,7 @@ impl DdlService for DdlServiceImpl { if let Some(source) = &mut source { // Generate source id. let source_id = self.gen_unique_id::<{ IdCategory::Table }>().await?; // TODO: Use source category - source.id = source_id; - - let mut source_count = 0; - for fragment in fragment_graph.fragments.values_mut() { - visit_fragment(fragment, |node_body| { - if let NodeBody::Source(source_node) = node_body { - // TODO: Refactor using source id. - source_node.source_inner.as_mut().unwrap().source_id = source_id; - source_count += 1; - } - }); - } - assert_eq!( - source_count, 1, - "require exactly 1 external stream source when creating table with a connector" - ); - - // Fill in the correct table id for source. - source.optional_associated_table_id = - Some(OptionalAssociatedTableId::AssociatedTableId(table_id)); - - // Fill in the correct source id for mview. - mview.optional_associated_source_id = - Some(OptionalAssociatedSourceId::AssociatedSourceId(source_id)); + fill_table_source(source, source_id, &mut mview, table_id, &mut fragment_graph); } let mut stream_job = StreamingJob::Table(source, mview); @@ -445,7 +436,11 @@ impl DdlService for DdlServiceImpl { let version = self .ddl_controller - .run_command(DdlCommand::CreateStreamingJob(stream_job, fragment_graph)) + .run_command(DdlCommand::CreateStreamingJob( + stream_job, + fragment_graph, + CreateType::Foreground, + )) .await?; Ok(Response::new(CreateTableResponse { @@ -530,10 +525,19 @@ impl DdlService for DdlServiceImpl { ) -> Result, Status> { let req = request.into_inner(); - let stream_job = StreamingJob::Table(None, req.table.unwrap()); - let fragment_graph = req.fragment_graph.unwrap(); + let mut source = req.source; + let mut fragment_graph = req.fragment_graph.unwrap(); + let mut table = req.table.unwrap(); + if let Some(OptionalAssociatedSourceId::AssociatedSourceId(source_id)) = + table.optional_associated_source_id + { + let source = source.as_mut().unwrap(); + let table_id = table.id; + fill_table_source(source, source_id, &mut table, table_id, &mut fragment_graph); + } let table_col_index_mapping = ColIndexMapping::from_protobuf(&req.table_col_index_mapping.unwrap()); + let stream_job = StreamingJob::Table(source, table); let version = self .ddl_controller @@ -713,7 +717,7 @@ impl DdlService for DdlServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn get_tables( &self, request: Request, @@ -728,6 +732,11 @@ impl DdlService for DdlServiceImpl { } Ok(Response::new(GetTablesResponse { tables })) } + + async fn wait(&self, _request: Request) -> Result, Status> { + self.ddl_controller.wait().await; + Ok(Response::new(WaitResponse {})) + } } impl DdlServiceImpl { @@ -760,3 +769,37 @@ impl DdlServiceImpl { Ok(()) } } + +fn fill_table_source( + source: &mut PbSource, + source_id: u32, + table: &mut PbTable, + table_id: u32, + fragment_graph: &mut PbStreamFragmentGraph, +) { + // If we're creating a table with connector, we should additionally fill its ID first. + source.id = source_id; + + let mut source_count = 0; + for fragment in fragment_graph.fragments.values_mut() { + visit_fragment(fragment, |node_body| { + if let NodeBody::Source(source_node) = node_body { + // TODO: Refactor using source id. + source_node.source_inner.as_mut().unwrap().source_id = source_id; + source_count += 1; + } + }); + } + assert_eq!( + source_count, 1, + "require exactly 1 external stream source when creating table with a connector" + ); + + // Fill in the correct table id for source. + source.optional_associated_table_id = + Some(OptionalAssociatedTableId::AssociatedTableId(table_id)); + + // Fill in the correct source id for mview. + table.optional_associated_source_id = + Some(OptionalAssociatedSourceId::AssociatedSourceId(source_id)); +} diff --git a/src/meta/src/rpc/service/health_service.rs b/src/meta/service/src/health_service.rs similarity index 93% rename from src/meta/src/rpc/service/health_service.rs rename to src/meta/service/src/health_service.rs index bdb01c1ef0760..338091a72de38 100644 --- a/src/meta/src/rpc/service/health_service.rs +++ b/src/meta/service/src/health_service.rs @@ -19,6 +19,12 @@ use tonic::{Request, Response, Status}; pub struct HealthServiceImpl {} +impl Default for HealthServiceImpl { + fn default() -> Self { + Self::new() + } +} + impl HealthServiceImpl { pub fn new() -> Self { Self {} diff --git a/src/meta/src/rpc/service/heartbeat_service.rs b/src/meta/service/src/heartbeat_service.rs similarity index 98% rename from src/meta/src/rpc/service/heartbeat_service.rs rename to src/meta/service/src/heartbeat_service.rs index 7c51b39346894..e31058ff2bdc5 100644 --- a/src/meta/src/rpc/service/heartbeat_service.rs +++ b/src/meta/service/src/heartbeat_service.rs @@ -32,7 +32,7 @@ impl HeartbeatServiceImpl { #[async_trait::async_trait] impl HeartbeatService for HeartbeatServiceImpl { - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn heartbeat( &self, request: Request, diff --git a/src/meta/src/rpc/service/hummock_service.rs b/src/meta/service/src/hummock_service.rs similarity index 80% rename from src/meta/src/rpc/service/hummock_service.rs rename to src/meta/service/src/hummock_service.rs index 58310cc6eb110..ebda1dd6e8be6 100644 --- a/src/meta/src/rpc/service/hummock_service.rs +++ b/src/meta/service/src/hummock_service.rs @@ -12,22 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::time::Duration; use futures::StreamExt; use itertools::Itertools; use risingwave_common::catalog::{TableId, NON_RESERVED_SYS_CATALOG_ID}; +use risingwave_pb::hummock::get_compaction_score_response::PickerInfo; use risingwave_pb::hummock::hummock_manager_service_server::HummockManagerService; use risingwave_pb::hummock::subscribe_compaction_event_request::Event as RequestEvent; use risingwave_pb::hummock::version_update_payload::Payload; use risingwave_pb::hummock::*; use tonic::{Request, Response, Status, Streaming}; -use crate::hummock::compaction::ManualCompactionOption; +use crate::hummock::compaction::selector::ManualCompactionOption; use crate::hummock::{HummockManagerRef, VacuumManagerRef}; use crate::manager::FragmentManagerRef; -use crate::rpc::service::RwReceiverStream; +use crate::RwReceiverStream; pub struct HummockServiceImpl { hummock_manager: HummockManagerRef, vacuum_manager: VacuumManagerRef, @@ -48,6 +49,18 @@ impl HummockServiceImpl { } } +macro_rules! fields_to_kvs { + ($struct:ident, $($field:ident),*) => { + { + let mut kvs = HashMap::default(); + $( + kvs.insert(stringify!($field).to_string(), $struct.$field.to_string()); + )* + kvs + } + } +} + #[async_trait::async_trait] impl HummockManagerService for HummockServiceImpl { type SubscribeCompactionEventStream = RwReceiverStream; @@ -513,4 +526,124 @@ impl HummockManagerService for HummockServiceImpl { Ok(Response::new(RwReceiverStream::new(rx))) } + + async fn report_compaction_task( + &self, + _request: Request, + ) -> Result, Status> { + unreachable!() + } + + async fn list_branched_object( + &self, + _request: Request, + ) -> Result, Status> { + let branched_objects = self + .hummock_manager + .list_branched_objects() + .await + .into_iter() + .flat_map(|(object_id, v)| { + v.into_iter() + .map(move |(compaction_group_id, sst_id)| BranchedObject { + object_id, + sst_id, + compaction_group_id, + }) + }) + .collect(); + Ok(Response::new(ListBranchedObjectResponse { + branched_objects, + })) + } + + async fn list_active_write_limit( + &self, + _request: Request, + ) -> Result, Status> { + Ok(Response::new(ListActiveWriteLimitResponse { + write_limits: self.hummock_manager.write_limits().await, + })) + } + + async fn list_hummock_meta_config( + &self, + _request: Request, + ) -> Result, Status> { + let opt = &self.hummock_manager.env.opts; + let configs = fields_to_kvs!( + opt, + vacuum_interval_sec, + vacuum_spin_interval_ms, + hummock_version_checkpoint_interval_sec, + min_delta_log_num_for_hummock_version_checkpoint, + min_sst_retention_time_sec, + full_gc_interval_sec, + collect_gc_watermark_spin_interval_sec, + periodic_compaction_interval_sec, + periodic_space_reclaim_compaction_interval_sec, + periodic_ttl_reclaim_compaction_interval_sec, + periodic_tombstone_reclaim_compaction_interval_sec, + periodic_split_compact_group_interval_sec, + split_group_size_limit, + min_table_split_size, + do_not_config_object_storage_lifecycle, + partition_vnode_count, + table_write_throughput_threshold, + min_table_split_write_throughput, + compaction_task_max_heartbeat_interval_secs + ); + Ok(Response::new(ListHummockMetaConfigResponse { configs })) + } + + async fn rise_ctl_rebuild_table_stats( + &self, + _request: Request, + ) -> Result, Status> { + self.hummock_manager.rebuild_table_stats().await?; + Ok(Response::new(RiseCtlRebuildTableStatsResponse {})) + } + + async fn get_compaction_score( + &self, + request: Request, + ) -> Result, Status> { + let compaction_group_id = request.into_inner().compaction_group_id; + let scores = self + .hummock_manager + .get_compaction_scores(compaction_group_id) + .await + .into_iter() + .map(|s| PickerInfo { + score: s.score, + select_level: s.select_level as _, + target_level: s.target_level as _, + picker_type: s.picker_type.to_string(), + }) + .collect(); + Ok(Response::new(GetCompactionScoreResponse { + compaction_group_id, + scores, + })) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + #[test] + fn test_fields_to_kvs() { + struct S { + foo: u64, + bar: String, + } + let s = S { + foo: 15, + bar: "foobar".to_string(), + }; + let kvs: HashMap = fields_to_kvs!(s, foo, bar); + assert_eq!(kvs.len(), 2); + assert_eq!(kvs.get("foo").unwrap(), "15"); + assert_eq!(kvs.get("bar").unwrap(), "foobar"); + } } diff --git a/src/meta/src/rpc/service/mod.rs b/src/meta/service/src/lib.rs similarity index 71% rename from src/meta/src/rpc/service/mod.rs rename to src/meta/service/src/lib.rs index 4484a8ca68a88..6c8cc11f8971c 100644 --- a/src/meta/src/rpc/service/mod.rs +++ b/src/meta/service/src/lib.rs @@ -12,6 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![feature(lint_reasons)] +#![feature(let_chains)] +#![feature(lazy_cell)] +#![feature(impl_trait_in_assoc_type)] +#![cfg_attr(coverage, feature(coverage_attribute))] + +use risingwave_meta::*; + pub mod backup_service; pub mod cloud_service; pub mod cluster_service; @@ -59,3 +67,25 @@ impl Stream for RwReceiverStream { .map(|opt| opt.map(|res| res.map_err(Into::into))) } } + +use std::net::SocketAddr; + +#[derive(Clone)] +pub struct AddressInfo { + pub advertise_addr: String, + pub listen_addr: SocketAddr, + pub prometheus_addr: Option, + pub dashboard_addr: Option, + pub ui_path: Option, +} +impl Default for AddressInfo { + fn default() -> Self { + Self { + advertise_addr: "".to_string(), + listen_addr: SocketAddr::V4("127.0.0.1:0000".parse().unwrap()), + prometheus_addr: None, + dashboard_addr: None, + ui_path: None, + } + } +} diff --git a/src/meta/src/rpc/service/meta_member_service.rs b/src/meta/service/src/meta_member_service.rs similarity index 96% rename from src/meta/src/rpc/service/meta_member_service.rs rename to src/meta/service/src/meta_member_service.rs index 6fb138b535410..5753061176e8c 100644 --- a/src/meta/src/rpc/service/meta_member_service.rs +++ b/src/meta/service/src/meta_member_service.rs @@ -14,13 +14,13 @@ use either::Either; use risingwave_common::util::addr::HostAddr; +use risingwave_meta::rpc::ElectionClientRef; use risingwave_pb::common::HostAddress; use risingwave_pb::meta::meta_member_service_server::MetaMemberService; use risingwave_pb::meta::{MembersRequest, MembersResponse, MetaMember}; use tonic::{Request, Response, Status}; -use crate::rpc::server::{AddressInfo, ElectionClientRef}; - +use crate::AddressInfo; #[derive(Clone)] pub struct MetaMemberServiceImpl { election_client_or_self: Either, @@ -36,7 +36,7 @@ impl MetaMemberServiceImpl { #[async_trait::async_trait] impl MetaMemberService for MetaMemberServiceImpl { - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn members( &self, _request: Request, diff --git a/src/meta/src/rpc/service/notification_service.rs b/src/meta/service/src/notification_service.rs similarity index 98% rename from src/meta/src/rpc/service/notification_service.rs rename to src/meta/service/src/notification_service.rs index 0fcbfe4929ec6..0fcc470a70e39 100644 --- a/src/meta/src/rpc/service/notification_service.rs +++ b/src/meta/service/src/notification_service.rs @@ -120,8 +120,7 @@ impl NotificationServiceImpl { async fn get_tables_and_creating_tables_snapshot(&self) -> (Vec

, NotificationVersion) { let catalog_guard = self.catalog_manager.get_catalog_core_guard().await; - let mut tables = catalog_guard.database.list_tables(); - tables.extend(catalog_guard.database.list_creating_tables()); + let tables = catalog_guard.database.list_tables(); let notification_version = self.env.notification_manager().current_version().await; (tables, notification_version) } @@ -208,7 +207,7 @@ impl NotificationServiceImpl { impl NotificationService for NotificationServiceImpl { type SubscribeStream = UnboundedReceiverStream; - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn subscribe( &self, request: Request, diff --git a/src/meta/src/rpc/service/scale_service.rs b/src/meta/service/src/scale_service.rs similarity index 98% rename from src/meta/src/rpc/service/scale_service.rs rename to src/meta/service/src/scale_service.rs index f231ea5f4955d..676180adc7581 100644 --- a/src/meta/src/rpc/service/scale_service.rs +++ b/src/meta/service/src/scale_service.rs @@ -59,7 +59,7 @@ impl ScaleServiceImpl { #[async_trait::async_trait] impl ScaleService for ScaleServiceImpl { - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn get_cluster_info( &self, _: Request, @@ -110,7 +110,7 @@ impl ScaleService for ScaleServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn reschedule( &self, request: Request, @@ -174,7 +174,7 @@ impl ScaleService for ScaleServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn get_reschedule_plan( &self, request: Request, diff --git a/src/meta/src/rpc/service/serving_service.rs b/src/meta/service/src/serving_service.rs similarity index 100% rename from src/meta/src/rpc/service/serving_service.rs rename to src/meta/service/src/serving_service.rs diff --git a/src/meta/src/rpc/service/sink_coordination_service.rs b/src/meta/service/src/sink_coordination_service.rs similarity index 92% rename from src/meta/src/rpc/service/sink_coordination_service.rs rename to src/meta/service/src/sink_coordination_service.rs index f7d56af9c063f..72c4cb2ff9af4 100644 --- a/src/meta/src/rpc/service/sink_coordination_service.rs +++ b/src/meta/service/src/sink_coordination_service.rs @@ -20,12 +20,12 @@ use tonic::{Request, Response, Status, Streaming}; use crate::manager::sink_coordination::SinkCoordinatorManager; #[derive(Clone)] -pub(crate) struct SinkCoordinationServiceImpl { +pub struct SinkCoordinationServiceImpl { sink_manager: SinkCoordinatorManager, } impl SinkCoordinationServiceImpl { - pub(crate) fn new(sink_manager: SinkCoordinatorManager) -> Self { + pub fn new(sink_manager: SinkCoordinatorManager) -> Self { Self { sink_manager } } } diff --git a/src/meta/src/rpc/service/stream_service.rs b/src/meta/service/src/stream_service.rs similarity index 96% rename from src/meta/src/rpc/service/stream_service.rs rename to src/meta/service/src/stream_service.rs index b2ed1ec916b08..92af1d4beb707 100644 --- a/src/meta/src/rpc/service/stream_service.rs +++ b/src/meta/service/src/stream_service.rs @@ -59,7 +59,7 @@ impl StreamServiceImpl { #[async_trait::async_trait] impl StreamManagerService for StreamServiceImpl { - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn flush(&self, request: Request) -> TonicResponse { self.env.idle_manager().record_activity(); let req = request.into_inner(); @@ -71,7 +71,7 @@ impl StreamManagerService for StreamServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn pause(&self, _: Request) -> Result, Status> { let i = self .barrier_scheduler @@ -83,7 +83,7 @@ impl StreamManagerService for StreamServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn resume(&self, _: Request) -> Result, Status> { let i = self .barrier_scheduler @@ -122,7 +122,7 @@ impl StreamManagerService for StreamServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn list_table_fragments( &self, request: Request, @@ -165,7 +165,7 @@ impl StreamManagerService for StreamServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn list_table_fragment_states( &self, _request: Request, @@ -186,7 +186,7 @@ impl StreamManagerService for StreamServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn list_fragment_distribution( &self, _request: Request, @@ -207,6 +207,7 @@ impl StreamManagerService for StreamServiceImpl { state_table_ids: fragment.state_table_ids.clone(), upstream_fragment_ids: fragment.upstream_fragment_ids.clone(), fragment_type_mask: fragment.fragment_type_mask, + parallelism: fragment.actors.len() as _, } }) }) @@ -214,7 +215,7 @@ impl StreamManagerService for StreamServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn list_actor_states( &self, _request: Request, diff --git a/src/meta/src/rpc/service/system_params_service.rs b/src/meta/service/src/system_params_service.rs similarity index 64% rename from src/meta/src/rpc/service/system_params_service.rs rename to src/meta/service/src/system_params_service.rs index 114c9aa917a68..8d557b401a2ed 100644 --- a/src/meta/src/rpc/service/system_params_service.rs +++ b/src/meta/service/src/system_params_service.rs @@ -19,16 +19,22 @@ use risingwave_pb::meta::{ }; use tonic::{Request, Response, Status}; +use crate::controller::system_param::SystemParamsControllerRef; use crate::manager::SystemParamsManagerRef; pub struct SystemParamsServiceImpl { system_params_manager: SystemParamsManagerRef, + system_params_controller: Option, } impl SystemParamsServiceImpl { - pub fn new(system_params_manager: SystemParamsManagerRef) -> Self { + pub fn new( + system_params_manager: SystemParamsManagerRef, + system_params_controller: Option, + ) -> Self { Self { system_params_manager, + system_params_controller, } } } @@ -39,8 +45,15 @@ impl SystemParamsService for SystemParamsServiceImpl { &self, _request: Request, ) -> Result, Status> { - let params = Some(self.system_params_manager.get_pb_params().await); - Ok(Response::new(GetSystemParamsResponse { params })) + let params = if let Some(ctl) = &self.system_params_controller { + ctl.get_pb_params().await + } else { + self.system_params_manager.get_pb_params().await + }; + + Ok(Response::new(GetSystemParamsResponse { + params: Some(params), + })) } async fn set_system_param( @@ -48,10 +61,14 @@ impl SystemParamsService for SystemParamsServiceImpl { request: Request, ) -> Result, Status> { let req = request.into_inner(); - let params = self - .system_params_manager - .set_param(&req.param, req.value) - .await?; + let params = if let Some(ctl) = &self.system_params_controller { + ctl.set_param(&req.param, req.value).await? + } else { + self.system_params_manager + .set_param(&req.param, req.value) + .await? + }; + Ok(Response::new(SetSystemParamResponse { params: Some(params), })) diff --git a/src/meta/src/rpc/service/telemetry_service.rs b/src/meta/service/src/telemetry_service.rs similarity index 67% rename from src/meta/src/rpc/service/telemetry_service.rs rename to src/meta/service/src/telemetry_service.rs index b1a9cdec3ef34..7c413406f13e5 100644 --- a/src/meta/src/rpc/service/telemetry_service.rs +++ b/src/meta/service/src/telemetry_service.rs @@ -14,25 +14,38 @@ use risingwave_pb::meta::telemetry_info_service_server::TelemetryInfoService; use risingwave_pb::meta::{GetTelemetryInfoRequest, TelemetryInfoResponse}; +use sea_orm::EntityTrait; use tonic::{Request, Response, Status}; +use crate::controller::SqlMetaStore; use crate::model::ClusterId; +use crate::model_v2::prelude::Cluster; use crate::storage::MetaStoreRef; +use crate::MetaResult; pub struct TelemetryInfoServiceImpl { meta_store: MetaStoreRef, + sql_meta_store: Option, } impl TelemetryInfoServiceImpl { - pub fn new(meta_store: MetaStoreRef) -> Self { - Self { meta_store } + pub fn new(meta_store: MetaStoreRef, sql_meta_store: Option) -> Self { + Self { + meta_store, + sql_meta_store, + } } - async fn get_tracking_id(&self) -> Option { - ClusterId::from_meta_store(&self.meta_store) + async fn get_tracking_id(&self) -> MetaResult> { + if let Some(store) = &self.sql_meta_store { + let cluster = Cluster::find().one(&store.conn).await?; + return Ok(cluster.map(|c| c.cluster_id.to_string().into())); + } + + Ok(ClusterId::from_meta_store(&self.meta_store) .await .ok() - .flatten() + .flatten()) } } @@ -42,7 +55,7 @@ impl TelemetryInfoService for TelemetryInfoServiceImpl { &self, _request: Request, ) -> Result, Status> { - match self.get_tracking_id().await { + match self.get_tracking_id().await? { Some(tracking_id) => Ok(Response::new(TelemetryInfoResponse { tracking_id: Some(tracking_id.into()), })), diff --git a/src/meta/src/rpc/service/user_service.rs b/src/meta/service/src/user_service.rs similarity index 96% rename from src/meta/src/rpc/service/user_service.rs rename to src/meta/service/src/user_service.rs index e1b7cc27092d5..cb290766e6fd1 100644 --- a/src/meta/src/rpc/service/user_service.rs +++ b/src/meta/service/src/user_service.rs @@ -107,7 +107,7 @@ impl UserServiceImpl { #[async_trait::async_trait] impl UserService for UserServiceImpl { - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn create_user( &self, request: Request, @@ -128,7 +128,7 @@ impl UserService for UserServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn drop_user( &self, request: Request, @@ -142,7 +142,7 @@ impl UserService for UserServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn update_user( &self, request: Request, @@ -151,7 +151,7 @@ impl UserService for UserServiceImpl { let update_fields = req .update_fields .iter() - .map(|i| UpdateField::from_i32(*i).unwrap()) + .map(|i| UpdateField::try_from(*i).unwrap()) .collect_vec(); let user = req.get_user()?.clone(); let version = self @@ -165,7 +165,7 @@ impl UserService for UserServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn grant_privilege( &self, request: Request, @@ -185,7 +185,7 @@ impl UserService for UserServiceImpl { })) } - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] async fn revoke_privilege( &self, request: Request, diff --git a/src/meta/src/backup_restore/backup_manager.rs b/src/meta/src/backup_restore/backup_manager.rs index 1eea48307cb33..819ea02e36346 100644 --- a/src/meta/src/backup_restore/backup_manager.rs +++ b/src/meta/src/backup_restore/backup_manager.rs @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashSet; use std::sync::Arc; use std::time::Instant; use arc_swap::ArcSwap; -use itertools::Itertools; use risingwave_backup::error::BackupError; use risingwave_backup::storage::{BoxedMetaSnapshotStorage, ObjectStoreMetaSnapshotStorage}; use risingwave_backup::{MetaBackupJobId, MetaSnapshotId, MetaSnapshotManifest}; @@ -68,9 +68,11 @@ pub struct BackupManager { hummock_manager: HummockManagerRef, backup_store: ArcSwap<(BoxedMetaSnapshotStorage, StoreConfig)>, /// Tracks the running backup job. Concurrent jobs is not supported. - running_backup_job: tokio::sync::Mutex>, + running_job_handle: tokio::sync::Mutex>, metrics: BackupManagerMetrics, meta_metrics: Arc, + /// (job id, status, message) + latest_job_info: ArcSwap<(MetaBackupJobId, BackupJobStatus, String)>, } impl BackupManager { @@ -147,9 +149,10 @@ impl BackupManager { env, hummock_manager, backup_store: ArcSwap::from_pointee(backup_store), - running_backup_job: tokio::sync::Mutex::new(None), + running_job_handle: tokio::sync::Mutex::new(None), metrics: BackupManagerMetrics::default(), meta_metrics, + latest_job_info: ArcSwap::from_pointee((0, BackupJobStatus::NotFound, "".into())), } } @@ -181,7 +184,7 @@ impl BackupManager { /// Starts a backup job in background. It's non-blocking. /// Returns job id. pub async fn start_backup_job(self: &Arc) -> MetaResult { - let mut guard = self.running_backup_job.lock().await; + let mut guard = self.running_job_handle.lock().await; if let Some(job) = (*guard).as_ref() { bail!(format!( "concurrent backup job is not supported: existent job {}", @@ -213,6 +216,8 @@ impl BackupManager { .id_gen_manager() .generate::<{ IdCategory::Backup }>() .await?; + self.latest_job_info + .store(Arc::new((job_id, BackupJobStatus::Running, "".into()))); let hummock_version_safe_point = self.hummock_manager.register_safe_point().await; // Ideally `BackupWorker` and its r/w IO can be made external to meta node. // The justification of keeping `BackupWorker` in meta node are: @@ -227,27 +232,12 @@ impl BackupManager { Ok(job_id) } - pub async fn get_backup_job_status( - &self, - job_id: MetaBackupJobId, - ) -> MetaResult { - if let Some(running_job) = self.running_backup_job.lock().await.as_ref() { - if running_job.job_id == job_id { - return Ok(BackupJobStatus::Running); - } - } - if self - .backup_store - .load() - .0 - .manifest() - .snapshot_metadata - .iter() - .any(|m| m.id == job_id) - { - return Ok(BackupJobStatus::Succeeded); + pub fn get_backup_job_status(&self, job_id: MetaBackupJobId) -> (BackupJobStatus, String) { + let last = self.latest_job_info.load(); + if last.0 == job_id { + return (last.1, last.2.clone()); } - Ok(BackupJobStatus::NotFound) + (BackupJobStatus::NotFound, "".into()) } async fn finish_backup_job(&self, job_id: MetaBackupJobId, job_result: BackupJobResult) { @@ -269,16 +259,24 @@ impl BackupManager { id: self.backup_store.load().0.manifest().manifest_id, }), ); + self.latest_job_info.store(Arc::new(( + job_id, + BackupJobStatus::Succeeded, + "".into(), + ))); } BackupJobResult::Failed(e) => { self.metrics.job_latency_failure.observe(job_latency); - tracing::warn!("failed backup job {}: {}", job_id, e); + let message = format!("failed backup job {}: {}", job_id, e); + tracing::warn!(message); + self.latest_job_info + .store(Arc::new((job_id, BackupJobStatus::Failed, message))); } } } async fn take_job_handle_by_job_id(&self, job_id: u64) -> Option { - let mut guard = self.running_backup_job.lock().await; + let mut guard = self.running_job_handle.lock().await; match (*guard).as_ref() { None => { return None; @@ -307,7 +305,7 @@ impl BackupManager { } /// List all `SSTables` required by backups. - pub fn list_pinned_ssts(&self) -> Vec { + pub fn list_pinned_ssts(&self) -> HashSet { self.backup_store .load() .0 @@ -315,8 +313,7 @@ impl BackupManager { .snapshot_metadata .iter() .flat_map(|s| s.ssts.clone()) - .dedup() - .collect_vec() + .collect() } pub fn manifest(&self) -> Arc { diff --git a/src/meta/src/backup_restore/meta_snapshot_builder.rs b/src/meta/src/backup_restore/meta_snapshot_builder.rs index 0df9966607b93..ef98c1158fd2a 100644 --- a/src/meta/src/backup_restore/meta_snapshot_builder.rs +++ b/src/meta/src/backup_restore/meta_snapshot_builder.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::future::Future; use anyhow::anyhow; @@ -65,18 +65,22 @@ impl MetaSnapshotBuilder { // hummock_version and version_stats is guaranteed to exist in a initialized cluster. let hummock_version = { let mut redo_state = hummock_version; - let hummock_version_deltas = - HummockVersionDelta::list_at_snapshot::(&meta_store_snapshot).await?; - for version_delta in &hummock_version_deltas { + let hummock_version_deltas: BTreeMap<_, _> = + HummockVersionDelta::list_at_snapshot::(&meta_store_snapshot) + .await? + .into_iter() + .map(|d| (d.id, d)) + .collect(); + for version_delta in hummock_version_deltas.values() { if version_delta.prev_id == redo_state.id { redo_state.apply_version_delta(version_delta); } } - if let Some(log) = hummock_version_deltas.iter().next_back() { - if log.id != redo_state.id { + if let Some((max_log_id, _)) = hummock_version_deltas.last_key_value() { + if *max_log_id != redo_state.id { return Err(BackupError::Other(anyhow::anyhow!(format!( "inconsistent hummock version: expected {}, actual {}", - log.id, redo_state.id + max_log_id, redo_state.id )))); } } @@ -187,7 +191,6 @@ mod tests { let v_ = v.clone(); async move { v_ } }; - hummock_version.insert(&meta_store).await.unwrap(); let err = builder .build(1, get_ckpt_builder(&hummock_version)) .await diff --git a/src/meta/src/backup_restore/restore.rs b/src/meta/src/backup_restore/restore.rs index 55de43d37d4eb..47d14330f5fb2 100644 --- a/src/meta/src/backup_restore/restore.rs +++ b/src/meta/src/backup_restore/restore.rs @@ -14,7 +14,6 @@ use std::sync::Arc; -use clap::Parser; use itertools::Itertools; use risingwave_backup::error::{BackupError, BackupResult}; use risingwave_backup::meta_snapshot::MetaSnapshot; @@ -33,7 +32,7 @@ use crate::model::{ClusterId, MetadataModel, TableFragments}; use crate::storage::{MetaStore, DEFAULT_COLUMN_FAMILY}; /// Command-line arguments for restore. -#[derive(Parser, Debug, Clone)] +#[derive(clap::Args, Debug, Clone)] pub struct RestoreOpts { /// Id of snapshot used to restore. Available snapshots can be found in /// /manifest.json. @@ -65,7 +64,7 @@ pub struct RestoreOpts { pub hummock_storage_url: String, /// Directory of storage to restore hummock version to. #[clap(long, default_value_t = String::from("hummock_001"))] - pub hummock_storage_dir: String, + pub hummock_storage_directory: String, /// Print the target snapshot, but won't restore to meta store. #[clap(long)] pub dry_run: bool, @@ -73,7 +72,7 @@ pub struct RestoreOpts { async fn restore_hummock_version( hummock_storage_url: &str, - hummock_storage_dir: &str, + hummock_storage_directory: &str, hummock_version: &HummockVersion, ) -> BackupResult<()> { let object_store = Arc::new( @@ -84,7 +83,7 @@ async fn restore_hummock_version( ) .await, ); - let checkpoint_path = version_checkpoint_path(hummock_storage_dir); + let checkpoint_path = version_checkpoint_path(hummock_storage_directory); let checkpoint = HummockVersionCheckpoint { version: Some(hummock_version.clone()), // Ignore stale objects. Full GC will clear them. @@ -153,7 +152,6 @@ async fn restore_default_cf( async fn restore_metadata(meta_store: S, snapshot: MetaSnapshot) -> BackupResult<()> { restore_default_cf(&meta_store, &snapshot).await?; - restore_metadata_model(&meta_store, &[snapshot.metadata.hummock_version]).await?; restore_metadata_model(&meta_store, &[snapshot.metadata.version_stats]).await?; restore_metadata_model( &meta_store, @@ -260,7 +258,7 @@ async fn restore_impl( } restore_hummock_version( &opts.hummock_storage_url, - &opts.hummock_storage_dir, + &opts.hummock_storage_directory, &target_snapshot.metadata.hummock_version, ) .await?; @@ -288,11 +286,10 @@ pub async fn restore(opts: RestoreOpts) -> BackupResult<()> { mod tests { use std::collections::HashMap; - use clap::Parser; use itertools::Itertools; use risingwave_backup::meta_snapshot::{ClusterMetadata, MetaSnapshot}; - use risingwave_common::config::SystemConfig; - use risingwave_pb::hummock::HummockVersion; + use risingwave_common::config::{MetaBackend, SystemConfig}; + use risingwave_pb::hummock::{HummockVersion, HummockVersionStats}; use risingwave_pb::meta::SystemParams; use crate::backup_restore::restore::restore_impl; @@ -304,17 +301,19 @@ mod tests { use crate::storage::{MetaStore, DEFAULT_COLUMN_FAMILY}; fn get_restore_opts() -> RestoreOpts { - RestoreOpts::parse_from([ - "restore", - "--meta-snapshot-id", - "1", - "--meta-store-type", - "mem", - "--backup-storage-url", - "memory", - "--hummock-storage-url", - "memory", - ]) + RestoreOpts { + meta_snapshot_id: 1, + meta_store_type: MetaBackend::Mem, + etcd_endpoints: "".to_string(), + etcd_auth: false, + etcd_username: "".to_string(), + etcd_password: "".to_string(), + backup_storage_url: "memory".to_string(), + backup_storage_directory: "".to_string(), + hummock_storage_url: "memory".to_string(), + hummock_storage_directory: "".to_string(), + dry_run: false, + } } fn get_system_params() -> SystemParams { @@ -332,8 +331,8 @@ mod tests { let backup_store = get_backup_store(opts.clone()).await.unwrap(); let nonempty_meta_store = get_meta_store(opts.clone()).await.unwrap(); dispatch_meta_store!(nonempty_meta_store.clone(), store, { - let hummock_version = HummockVersion::default(); - hummock_version.insert(&store).await.unwrap(); + let stats = HummockVersionStats::default(); + stats.insert(&store).await.unwrap(); }); let empty_meta_store = get_meta_store(opts.clone()).await.unwrap(); let system_param = get_system_params(); @@ -378,13 +377,6 @@ mod tests { .unwrap(); dispatch_meta_store!(empty_meta_store, store, { - let restored_hummock_version = HummockVersion::list(&store) - .await - .unwrap() - .into_iter() - .next() - .unwrap(); - assert_eq!(restored_hummock_version.id, 123); let restored_system_param = SystemParams::get(&store).await.unwrap().unwrap(); assert_eq!(restored_system_param, system_param); }); @@ -548,7 +540,6 @@ mod tests { .unwrap(); dispatch_meta_store!(empty_meta_store, store, { - assert!(HummockVersion::list(&store).await.unwrap().is_empty()); assert!(SystemParams::get(&store).await.unwrap().is_none()); }); } diff --git a/src/meta/src/barrier/command.rs b/src/meta/src/barrier/command.rs index d0deac65b3207..bbe60c010b94b 100644 --- a/src/meta/src/barrier/command.rs +++ b/src/meta/src/barrier/command.rs @@ -36,7 +36,8 @@ use uuid::Uuid; use super::info::BarrierActorInfo; use super::trace::TracedEpoch; use crate::barrier::CommandChanges; -use crate::manager::{FragmentManagerRef, WorkerId}; +use crate::hummock::HummockManagerRef; +use crate::manager::{CatalogManagerRef, FragmentManagerRef, WorkerId}; use crate::model::{ActorId, DispatcherId, FragmentId, TableFragments}; use crate::stream::{build_actor_connector_splits, SourceManagerRef, SplitAssignment}; use crate::MetaResult; @@ -139,6 +140,7 @@ pub enum Command { new_table_fragments: TableFragments, merge_updates: Vec, dispatchers: HashMap>, + init_split_assignment: SplitAssignment, }, /// `SourceSplitAssignment` generates Plain(Mutation::Splits) for pushing initialized splits or @@ -215,7 +217,9 @@ impl Command { /// [`CommandContext`] is used for generating barrier and doing post stuffs according to the given /// [`Command`]. pub struct CommandContext { - fragment_manager: FragmentManagerRef, + pub fragment_manager: FragmentManagerRef, + catalog_manager: CatalogManagerRef, + hummock_manager: HummockManagerRef, client_pool: StreamClientPoolRef, @@ -246,6 +250,8 @@ impl CommandContext { #[allow(clippy::too_many_arguments)] pub(super) fn new( fragment_manager: FragmentManagerRef, + catalog_manager: CatalogManagerRef, + hummock_manager: HummockManagerRef, client_pool: StreamClientPoolRef, info: BarrierActorInfo, prev_epoch: TracedEpoch, @@ -258,6 +264,8 @@ impl CommandContext { ) -> Self { Self { fragment_manager, + catalog_manager, + hummock_manager, client_pool, info: Arc::new(info), prev_epoch, @@ -352,6 +360,7 @@ impl CommandContext { old_table_fragments, merge_updates, dispatchers, + init_split_assignment, .. } => { let dropped_actors = old_table_fragments.actor_ids(); @@ -368,10 +377,16 @@ impl CommandContext { }) .collect(); + let actor_splits = init_split_assignment + .values() + .flat_map(build_actor_connector_splits) + .collect(); + Some(Mutation::Update(UpdateMutation { actor_new_dispatchers, merge_update: merge_updates.clone(), dropped_actors, + actor_splits, ..Default::default() })) } @@ -655,7 +670,51 @@ impl CommandContext { Command::CancelStreamingJob(table_fragments) => { let node_actors = table_fragments.worker_actor_ids(); self.clean_up(node_actors).await?; - // Drop fragment info in meta store. + + // NOTE(kwannoel): At this point, meta has already registered the table ids. + // We should unregister them. + // This is required for background ddl, for foreground ddl this is a no-op. + // Foreground ddl is handled entirely by stream manager, so it will unregister + // the table ids on failure. + // On the other hand background ddl could be handled by barrier manager. + // It won't clean the tables on failure, + // since the failure could be recoverable. + // As such it needs to be handled here. + let table_id = table_fragments.table_id().table_id; + let mut table_ids = table_fragments.internal_table_ids(); + table_ids.push(table_id); + if let Err(e) = self.hummock_manager.unregister_table_ids(&table_ids).await { + tracing::warn!("Failed to unregister compaction group for {:#?}. They will be cleaned up on node restart. {:#?}", &table_ids, e); + } + + // NOTE(kwannoel): At this point, catalog manager has persisted the tables already. + // We need to cleanup the table state. So we can do it here. + // The logic is the same as above, for hummock_manager.unregister_table_ids. + if let Err(e) = self + .catalog_manager + .cancel_create_table_procedure( + table_fragments.table_id().table_id, + table_fragments.internal_table_ids(), + ) + .await + { + let table_id = table_fragments.table_id().table_id; + tracing::warn!( + table_id, + reason=?e, + "cancel_create_table_procedure failed for CancelStreamingJob", + ); + // If failed, check that table is not in meta store. + // If any table is, just panic, let meta do bootstrap recovery. + // Otherwise our persisted state is dirty. + let mut table_ids = table_fragments.internal_table_ids(); + table_ids.push(table_id); + self.catalog_manager.assert_tables_deleted(table_ids).await; + } + + // We need to drop table fragments here, + // since this is not done in stream manager (foreground ddl) + // OR barrier manager (background ddl) self.fragment_manager .drop_table_fragments_vec(&HashSet::from_iter(std::iter::once( table_fragments.table_id(), @@ -761,6 +820,7 @@ impl CommandContext { new_table_fragments, merge_updates, dispatchers, + .. } => { let table_ids = HashSet::from_iter(std::iter::once(old_table_fragments.table_id())); @@ -782,24 +842,4 @@ impl CommandContext { Ok(()) } - - /// Do some stuffs before the barrier is `finish`ed. Only used for `CreateStreamingJob`. - pub async fn pre_finish(&self) -> MetaResult<()> { - #[allow(clippy::single_match)] - match &self.command { - Command::CreateStreamingJob { - table_fragments, .. - } => { - // Update the state of the table fragments from `Creating` to `Created`, so that the - // fragments can be scaled. - self.fragment_manager - .mark_table_fragments_created(table_fragments.table_id()) - .await?; - } - - _ => {} - } - - Ok(()) - } } diff --git a/src/meta/src/barrier/mod.rs b/src/meta/src/barrier/mod.rs index cd3ee0360009f..d39dde51399d8 100644 --- a/src/meta/src/barrier/mod.rs +++ b/src/meta/src/barrier/mod.rs @@ -50,7 +50,7 @@ use self::info::BarrierActorInfo; use self::notifier::Notifier; use self::progress::TrackingCommand; use crate::barrier::notifier::BarrierInfo; -use crate::barrier::progress::CreateMviewProgressTracker; +use crate::barrier::progress::{CreateMviewProgressTracker, TrackingJob}; use crate::barrier::BarrierEpochState::{Completed, InFlight}; use crate::hummock::HummockManagerRef; use crate::manager::sink_coordination::SinkCoordinatorManager; @@ -58,7 +58,7 @@ use crate::manager::{ CatalogManagerRef, ClusterManagerRef, FragmentManagerRef, LocalNotification, MetaSrvEnv, WorkerId, }; -use crate::model::{ActorId, BarrierManagerState}; +use crate::model::{ActorId, BarrierManagerState, TableFragments}; use crate::rpc::metrics::MetaMetrics; use crate::stream::SourceManagerRef; use crate::{MetaError, MetaResult}; @@ -75,6 +75,35 @@ pub use self::command::{Command, Reschedule}; pub use self::schedule::BarrierScheduler; pub use self::trace::TracedEpoch; +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub(crate) struct TableMap { + inner: HashMap, +} + +impl TableMap { + pub fn remove(&mut self, table_id: &TableId) -> Option { + self.inner.remove(table_id) + } +} + +impl From> for TableMap { + fn from(inner: HashMap) -> Self { + Self { inner } + } +} + +impl From> for HashMap { + fn from(table_map: TableMap) -> Self { + table_map.inner + } +} + +pub(crate) type TableActorMap = TableMap>; +pub(crate) type TableUpstreamMvCountMap = TableMap>; +pub(crate) type TableDefinitionMap = TableMap; +pub(crate) type TableNotifierMap = TableMap; +pub(crate) type TableFragmentMap = TableMap; + /// Status of barrier manager. enum BarrierManagerStatus { /// Barrier manager is starting. @@ -139,7 +168,7 @@ pub struct GlobalBarrierManager { cluster_manager: ClusterManagerRef, - pub(crate) catalog_manager: CatalogManagerRef, + pub catalog_manager: CatalogManagerRef, fragment_manager: FragmentManagerRef, @@ -151,7 +180,7 @@ pub struct GlobalBarrierManager { metrics: Arc, - pub(crate) env: MetaSrvEnv, + pub env: MetaSrvEnv, tracker: Mutex, } @@ -177,7 +206,7 @@ struct CheckpointControl { metrics: Arc, /// Get notified when we finished Create MV and collect a barrier(checkpoint = true) - finished_commands: Vec, + finished_commands: Vec, } impl CheckpointControl { @@ -194,8 +223,8 @@ impl CheckpointControl { } /// Stash a command to finish later. - fn stash_command_to_finish(&mut self, finished_command: TrackingCommand) { - self.finished_commands.push(finished_command); + fn stash_command_to_finish(&mut self, finished_job: TrackingJob) { + self.finished_commands.push(finished_job); } /// Finish stashed commands. If the current barrier is not a `checkpoint`, we will not finish @@ -205,31 +234,32 @@ impl CheckpointControl { async fn finish_commands(&mut self, checkpoint: bool) -> MetaResult { for command in self .finished_commands - .extract_if(|c| checkpoint || c.context.kind.is_barrier()) + .extract_if(|c| checkpoint || c.is_barrier()) { // The command is ready to finish. We can now call `pre_finish`. - command.context.pre_finish().await?; - command - .notifiers - .into_iter() - .for_each(Notifier::notify_finished); + command.pre_finish().await?; + command.notify_finished(); } Ok(!self.finished_commands.is_empty()) } - fn cancel_command(&mut self, cancelled_command: TrackingCommand) { - if let Some(index) = self.command_ctx_queue.iter().position(|x| { - x.command_ctx.prev_epoch.value() == cancelled_command.context.prev_epoch.value() - }) { - self.command_ctx_queue.remove(index); - self.remove_changes(cancelled_command.context.command.changes()); + fn cancel_command(&mut self, cancelled_job: TrackingJob) { + if let TrackingJob::New(cancelled_command) = cancelled_job { + if let Some(index) = self.command_ctx_queue.iter().position(|x| { + x.command_ctx.prev_epoch.value() == cancelled_command.context.prev_epoch.value() + }) { + self.command_ctx_queue.remove(index); + self.remove_changes(cancelled_command.context.command.changes()); + } + } else { + // Recovered jobs do not need to be cancelled since only `RUNNING` actors will get recovered. } } fn cancel_stashed_command(&mut self, id: TableId) { self.finished_commands - .retain(|x| x.context.table_to_create() != Some(id)); + .retain(|x| x.table_to_create() != Some(id)); } /// Before resolving the actors to be sent or collected, we should first record the newly @@ -688,13 +718,13 @@ impl GlobalBarrierManager { }; // Tracing related stuff - prev_epoch.span().in_scope(|| { - tracing::info!(target: "rw_tracing", epoch = curr_epoch.value().0, "new barrier enqueued"); - }); + tracing::info!(target: "rw_tracing", parent: prev_epoch.span(), epoch = curr_epoch.value().0, "new barrier enqueued"); span.record("epoch", curr_epoch.value().0); let command_ctx = Arc::new(CommandContext::new( self.fragment_manager.clone(), + self.catalog_manager.clone(), + self.hummock_manager.clone(), self.env.stream_client_pool_ref(), info, prev_epoch.clone(), @@ -798,9 +828,9 @@ impl GlobalBarrierManager { actor_ids_to_send, actor_ids_to_collect, }; - tracing::trace!( + tracing::debug!( target: "events::meta::barrier::inject_barrier", - "inject barrier request: {:?}", request + ?request, "inject barrier request" ); // This RPC returns only if this worker node has injected this barrier. @@ -840,9 +870,9 @@ impl GlobalBarrierManager { prev_epoch, tracing_context, }; - tracing::trace!( + tracing::debug!( target: "events::meta::barrier::barrier_complete", - "barrier complete request: {:?}", request + ?request, "barrier complete" ); // This RPC returns only if this worker node has collected this barrier. @@ -912,6 +942,7 @@ impl GlobalBarrierManager { let fail_nodes = complete_nodes .drain(index..) .chain(checkpoint_control.barrier_failed().into_iter()); + tracing::warn!("Failed to commit epoch {}: {:?}", prev_epoch, err); self.failure_recovery(err, fail_nodes, state, checkpoint_control) .await; } @@ -939,11 +970,7 @@ impl GlobalBarrierManager { } if self.enable_recovery { - // If failed, enter recovery mode. self.set_status(BarrierManagerStatus::Recovering).await; - let mut tracker = self.tracker.lock().await; - *tracker = CreateMviewProgressTracker::new(); - let latest_snapshot = self.hummock_manager.latest_snapshot(); let prev_epoch = TracedEpoch::new(latest_snapshot.committed_epoch.into()); // we can only recovery from the committed epoch let span = tracing::info_span!( @@ -952,6 +979,8 @@ impl GlobalBarrierManager { prev_epoch = prev_epoch.value().0 ); + // No need to clean dirty tables for barrier recovery, + // The foreground stream job should cleanup their own tables. *state = self.recovery(prev_epoch, None).instrument(span).await; self.set_status(BarrierManagerStatus::Running).await; } else { diff --git a/src/meta/src/barrier/notifier.rs b/src/meta/src/barrier/notifier.rs index 88acd9cd3dd7a..b28c5b01d53d9 100644 --- a/src/meta/src/barrier/notifier.rs +++ b/src/meta/src/barrier/notifier.rs @@ -30,7 +30,7 @@ pub struct BarrierInfo { /// Used for notifying the status of a scheduled command/barrier. #[derive(Debug, Default)] -pub(super) struct Notifier { +pub(crate) struct Notifier { /// Get notified when scheduled barrier is injected to compute nodes. pub injected: Option>, diff --git a/src/meta/src/barrier/progress.rs b/src/meta/src/barrier/progress.rs index d484e471f4a31..22cd6f8d9e200 100644 --- a/src/meta/src/barrier/progress.rs +++ b/src/meta/src/barrier/progress.rs @@ -25,17 +25,21 @@ use risingwave_pb::stream_service::barrier_complete_response::CreateMviewProgres use super::command::CommandContext; use super::notifier::Notifier; -use crate::barrier::Command; -use crate::model::ActorId; +use crate::barrier::{ + Command, TableActorMap, TableDefinitionMap, TableFragmentMap, TableNotifierMap, + TableUpstreamMvCountMap, +}; +use crate::manager::{FragmentManager, FragmentManagerRef}; +use crate::model::{ActorId, TableFragments}; +use crate::MetaResult; -type CreateMviewEpoch = Epoch; type ConsumedRows = u64; #[derive(Clone, Copy, Debug)] -enum ChainState { +pub enum ChainState { Init, ConsumingUpstream(Epoch, ConsumedRows), - Done, + Done(ConsumedRows), } /// Progress of all actors containing chain nodes while creating mview. @@ -45,10 +49,9 @@ struct Progress { done_count: usize, - /// Creating mv id. - creating_mv_id: TableId, - - /// Upstream mv count. Keep track of how many times each upstream MV appears. + /// Upstream mv count. + /// Keep track of how many times each upstream MV + /// appears in this stream job. upstream_mv_count: HashMap, /// Upstream mvs total key count. @@ -65,7 +68,6 @@ impl Progress { /// Create a [`Progress`] for some creating mview, with all `actors` containing the chain nodes. fn new( actors: impl IntoIterator, - creating_mv_id: TableId, upstream_mv_count: HashMap, upstream_total_key_count: u64, definition: String, @@ -79,7 +81,6 @@ impl Progress { Self { states, done_count: 0, - creating_mv_id, upstream_mv_count, upstream_total_key_count, consumed_rows: 0, @@ -93,18 +94,17 @@ impl Progress { match self.states.remove(&actor).unwrap() { ChainState::Init => {} ChainState::ConsumingUpstream(_, old_consumed_rows) => { - if !matches!(new_state, ChainState::Done) { - self.consumed_rows -= old_consumed_rows; - } + self.consumed_rows -= old_consumed_rows; } - ChainState::Done => panic!("should not report done multiple times"), + ChainState::Done(_) => panic!("should not report done multiple times"), }; match &new_state { ChainState::Init => {} ChainState::ConsumingUpstream(_, new_consumed_rows) => { self.consumed_rows += new_consumed_rows; } - ChainState::Done => { + ChainState::Done(new_consumed_rows) => { + self.consumed_rows += new_consumed_rows; self.done_count += 1; } }; @@ -140,6 +140,80 @@ impl Progress { } } +/// There are 2 kinds of `TrackingJobs`: +/// 1. `New`. This refers to the "New" type of tracking job. +/// It is instantiated and managed by the stream manager. +/// On recovery, the stream manager will stop managing the job. +/// 2. `Recovered`. This refers to the "Recovered" type of tracking job. +/// On recovery, the barrier manager will recover and start managing the job. +pub enum TrackingJob { + New(TrackingCommand), + Recovered(RecoveredTrackingJob), +} + +impl TrackingJob { + fn fragment_manager(&self) -> &FragmentManager { + match self { + TrackingJob::New(command) => command.context.fragment_manager.as_ref(), + TrackingJob::Recovered(recovered) => recovered.fragment_manager.as_ref(), + } + } + + pub(crate) fn is_barrier(&self) -> bool { + match self { + TrackingJob::Recovered(_) => true, + TrackingJob::New(command) => command.context.kind.is_barrier(), + } + } + + pub(crate) async fn pre_finish(&self) -> MetaResult<()> { + let table_fragments = match &self { + TrackingJob::New(command) => match &command.context.command { + Command::CreateStreamingJob { + table_fragments, .. + } => Some(table_fragments), + _ => None, + }, + TrackingJob::Recovered(recovered) => Some(&recovered.fragments), + }; + // Update the state of the table fragments from `Creating` to `Created`, so that the + // fragments can be scaled. + if let Some(table_fragments) = table_fragments { + self.fragment_manager() + .mark_table_fragments_created(table_fragments.table_id()) + .await?; + } + Ok(()) + } + + pub(crate) fn notify_finished(self) { + match self { + TrackingJob::New(command) => { + command + .notifiers + .into_iter() + .for_each(Notifier::notify_finished); + } + TrackingJob::Recovered(recovered) => { + recovered.finished.notify_finished(); + } + } + } + + pub(crate) fn table_to_create(&self) -> Option { + match self { + TrackingJob::New(command) => command.context.table_to_create(), + TrackingJob::Recovered(recovered) => Some(recovered.fragments.table_id()), + } + } +} + +pub struct RecoveredTrackingJob { + pub fragments: TableFragments, + pub finished: Notifier, + pub fragment_manager: FragmentManagerRef, +} + /// The command tracking by the [`CreateMviewProgressTracker`]. pub(super) struct TrackingCommand { /// The context of the command. @@ -151,15 +225,80 @@ pub(super) struct TrackingCommand { /// Track the progress of all creating mviews. When creation is done, `notify_finished` will be /// called on registered notifiers. +/// +/// Tracking is done as follows: +/// 1. We identify a `StreamJob` by its `TableId` of its `Materialized` table. +/// 2. For each stream job, there are several actors which run its tasks. +/// 3. With `progress_map` we can use the ID of the `StreamJob` to view its progress. +/// 4. With `actor_map` we can use an actor's `ActorId` to find the ID of the `StreamJob`. pub(super) struct CreateMviewProgressTracker { - /// Progress of the create-mview DDL indicated by the epoch. - progress_map: HashMap, + /// Progress of the create-mview DDL indicated by the TableId. + progress_map: HashMap, /// Find the epoch of the create-mview DDL by the actor containing the chain node. - actor_map: HashMap, + actor_map: HashMap, } impl CreateMviewProgressTracker { + /// This step recovers state from the meta side: + /// 1. `Tables`. + /// 2. `TableFragments`. + /// + /// Other state are persisted by the `BackfillExecutor`, such as: + /// 1. `CreateMviewProgress`. + /// 2. `Backfill` position. + pub fn recover( + table_map: TableActorMap, + mut upstream_mv_counts: TableUpstreamMvCountMap, + mut definitions: TableDefinitionMap, + version_stats: HummockVersionStats, + mut finished_notifiers: TableNotifierMap, + mut table_fragment_map: TableFragmentMap, + fragment_manager: FragmentManagerRef, + ) -> Self { + let mut actor_map = HashMap::new(); + let mut progress_map = HashMap::new(); + let table_map: HashMap<_, Vec> = table_map.into(); + for (creating_table_id, actors) in table_map { + // 1. Recover `ChainState` in the tracker. + let mut states = HashMap::new(); + for actor in actors { + actor_map.insert(actor, creating_table_id); + states.insert(actor, ChainState::ConsumingUpstream(Epoch(0), 0)); + } + let upstream_mv_count = upstream_mv_counts.remove(&creating_table_id).unwrap(); + let upstream_total_key_count = upstream_mv_count + .iter() + .map(|(upstream_mv, count)| { + *count as u64 + * version_stats + .table_stats + .get(&upstream_mv.table_id) + .map_or(0, |stat| stat.total_key_count as u64) + }) + .sum(); + let definition = definitions.remove(&creating_table_id).unwrap(); + let progress = Progress { + states, + done_count: 0, // Fill only after first barrier pass + upstream_mv_count, + upstream_total_key_count, + consumed_rows: 0, // Fill only after first barrier pass + definition, + }; + let tracking_job = TrackingJob::Recovered(RecoveredTrackingJob { + fragments: table_fragment_map.remove(&creating_table_id).unwrap(), + finished: finished_notifiers.remove(&creating_table_id).unwrap(), + fragment_manager: fragment_manager.clone(), + }); + progress_map.insert(creating_table_id, (progress, tracking_job)); + } + Self { + progress_map, + actor_map, + } + } + pub fn new() -> Self { Self { progress_map: Default::default(), @@ -169,9 +308,9 @@ impl CreateMviewProgressTracker { pub fn gen_ddl_progress(&self) -> Vec { self.progress_map - .values() - .map(|(x, _)| DdlProgress { - id: x.creating_mv_id.table_id as u64, + .iter() + .map(|(table_id, (x, _))| DdlProgress { + id: table_id.table_id as u64, statement: x.definition.clone(), progress: format!("{:.2}%", x.calculate_progress() * 100.0), }) @@ -184,7 +323,7 @@ impl CreateMviewProgressTracker { pub fn find_cancelled_command( &mut self, actors_to_cancel: HashSet, - ) -> Option { + ) -> Option { let epochs = actors_to_cancel .into_iter() .map(|actor_id| self.actor_map.get(&actor_id)) @@ -206,16 +345,11 @@ impl CreateMviewProgressTracker { &mut self, command: TrackingCommand, version_stats: &HummockVersionStats, - ) -> Option { + ) -> Option { let actors = command.context.actors_to_track(); if actors.is_empty() { // The command can be finished immediately. - return Some(command); - } - - let ddl_epoch = command.context.curr_epoch.value(); - for &actor in &actors { - self.actor_map.insert(actor, ddl_epoch); + return Some(TrackingJob::New(command)); } let (creating_mv_id, upstream_mv_count, upstream_total_key_count, definition) = @@ -259,14 +393,19 @@ impl CreateMviewProgressTracker { unreachable!("Must be CreateStreamingJob."); }; + for &actor in &actors { + self.actor_map.insert(actor, creating_mv_id); + } + let progress = Progress::new( actors, - creating_mv_id, upstream_mv_count, upstream_total_key_count, definition, ); - let old = self.progress_map.insert(ddl_epoch, (progress, command)); + let old = self + .progress_map + .insert(creating_mv_id, (progress, TrackingJob::New(command))); assert!(old.is_none()); None } @@ -278,22 +417,29 @@ impl CreateMviewProgressTracker { &mut self, progress: &CreateMviewProgress, version_stats: &HummockVersionStats, - ) -> Option { + ) -> Option { let actor = progress.chain_actor_id; - let Some(epoch) = self.actor_map.get(&actor).copied() else { - panic!( - "no tracked progress for actor {}, is it already finished?", + let Some(table_id) = self.actor_map.get(&actor).copied() else { + // On restart, backfill will ALWAYS notify CreateMviewProgressTracker, + // even if backfill is finished on recovery. + // This is because we don't know if only this actor is finished, + // OR the entire stream job is finished. + // For the first case, we must notify meta. + // For the second case, we can still notify meta, but ignore it here. + tracing::info!( + "no tracked progress for actor {}, the stream job could already be finished", actor ); + return None; }; let new_state = if progress.done { - ChainState::Done + ChainState::Done(progress.consumed_rows) } else { ChainState::ConsumingUpstream(progress.consumed_epoch.into(), progress.consumed_rows) }; - match self.progress_map.entry(epoch) { + match self.progress_map.entry(table_id) { Entry::Occupied(mut o) => { let progress = &mut o.get_mut().0; @@ -301,6 +447,7 @@ impl CreateMviewProgressTracker { .upstream_mv_count .iter() .map(|(upstream_mv, count)| { + assert_ne!(*count, 0); *count as u64 * version_stats .table_stats @@ -312,7 +459,10 @@ impl CreateMviewProgressTracker { progress.update(actor, new_state, upstream_total_key_count); if progress.is_done() { - tracing::debug!("all actors done for creating mview with epoch {}!", epoch); + tracing::debug!( + "all actors done for creating mview with table_id {}!", + table_id + ); // Clean-up the mapping from actors to DDL epoch. for actor in o.get().0.actors() { diff --git a/src/meta/src/barrier/recovery.rs b/src/meta/src/barrier/recovery.rs index bce901cd6f459..3e319f0e69a52 100644 --- a/src/meta/src/barrier/recovery.rs +++ b/src/meta/src/barrier/recovery.rs @@ -12,12 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::{BTreeSet, HashSet}; +use std::collections::{BTreeSet, HashMap, HashSet}; use std::sync::Arc; use std::time::{Duration, Instant}; +use anyhow::anyhow; use futures::future::try_join_all; use itertools::Itertools; +use risingwave_common::catalog::TableId; use risingwave_pb::common::ActorInfo; use risingwave_pb::meta::PausedReason; use risingwave_pb::stream_plan::barrier::{BarrierKind, Mutation}; @@ -25,6 +27,7 @@ use risingwave_pb::stream_plan::AddMutation; use risingwave_pb::stream_service::{ BroadcastActorInfoTableRequest, BuildActorsRequest, ForceStopActorsRequest, UpdateActorsRequest, }; +use tokio::sync::oneshot; use tokio_retry::strategy::{jitter, ExponentialBackoff}; use tracing::{debug, warn, Instrument}; use uuid::Uuid; @@ -32,6 +35,8 @@ use uuid::Uuid; use super::TracedEpoch; use crate::barrier::command::CommandContext; use crate::barrier::info::BarrierActorInfo; +use crate::barrier::notifier::Notifier; +use crate::barrier::progress::CreateMviewProgressTracker; use crate::barrier::{CheckpointControl, Command, GlobalBarrierManager}; use crate::manager::WorkerId; use crate::model::{BarrierManagerState, MigrationPlan}; @@ -60,22 +65,30 @@ impl GlobalBarrierManager { .await } + /// Please look at `CatalogManager::clean_dirty_tables` for more details. + /// This should only be called for bootstrap recovery. + async fn clean_dirty_tables(&self) -> MetaResult<()> { + let fragment_manager = self.fragment_manager.clone(); + self.catalog_manager + .clean_dirty_tables(fragment_manager) + .await?; + Ok(()) + } + /// Clean up all dirty streaming jobs. async fn clean_dirty_fragments(&self) -> MetaResult<()> { let stream_job_ids = self.catalog_manager.list_stream_job_ids().await?; let to_drop_table_fragments = self .fragment_manager - .list_dirty_table_fragments(|tf| { - !stream_job_ids.contains(&tf.table_id().table_id) || !tf.is_created() - }) + .list_dirty_table_fragments(|tf| !stream_job_ids.contains(&tf.table_id().table_id)) .await; - let to_drop_streaming_ids = to_drop_table_fragments .iter() .map(|t| t.table_id()) .collect(); debug!("clean dirty table fragments: {:?}", to_drop_streaming_ids); + self.fragment_manager .drop_table_fragments_vec(&to_drop_streaming_ids) .await?; @@ -86,7 +99,7 @@ impl GlobalBarrierManager { &to_drop_table_fragments ) .await.inspect_err(|e| - tracing::warn!( + warn!( "Failed to unregister compaction group for {:#?}. They will be cleaned up on node restart. {:#?}", to_drop_table_fragments, e) @@ -100,6 +113,101 @@ impl GlobalBarrierManager { Ok(()) } + async fn recover_background_mv_progress(&self) -> MetaResult<()> { + let creating_tables = self.catalog_manager.list_creating_background_mvs().await; + let creating_table_ids = creating_tables + .iter() + .map(|t| TableId { table_id: t.id }) + .collect_vec(); + + let mut senders = HashMap::new(); + let mut receivers = Vec::new(); + for table_id in creating_table_ids.iter().copied() { + let (finished_tx, finished_rx) = oneshot::channel(); + senders.insert( + table_id, + Notifier { + finished: Some(finished_tx), + ..Default::default() + }, + ); + + let fragments = self + .fragment_manager + .select_table_fragments_by_table_id(&table_id) + .await?; + let internal_table_ids = fragments.internal_table_ids(); + let internal_tables = self.catalog_manager.get_tables(&internal_table_ids).await; + let table = self.catalog_manager.get_tables(&[table_id.table_id]).await; + assert_eq!(table.len(), 1, "should only have 1 materialized table"); + let table = table.into_iter().next().unwrap(); + receivers.push((table, internal_tables, finished_rx)); + } + + let table_map = self + .fragment_manager + .get_table_id_actor_mapping(&creating_table_ids) + .await; + let table_fragment_map = self + .fragment_manager + .get_table_id_table_fragment_map(&creating_table_ids) + .await?; + let upstream_mv_counts = self + .fragment_manager + .get_upstream_relation_counts(&creating_table_ids) + .await; + let definitions: HashMap<_, _> = creating_tables + .into_iter() + .map(|t| (TableId { table_id: t.id }, t.definition)) + .collect(); + let version_stats = self.hummock_manager.get_version_stats().await; + // If failed, enter recovery mode. + { + let mut tracker = self.tracker.lock().await; + *tracker = CreateMviewProgressTracker::recover( + table_map.into(), + upstream_mv_counts.into(), + definitions.into(), + version_stats, + senders.into(), + table_fragment_map.into(), + self.fragment_manager.clone(), + ); + } + for (table, internal_tables, finished) in receivers { + let catalog_manager = self.catalog_manager.clone(); + tokio::spawn(async move { + let res: MetaResult<()> = try { + tracing::debug!("recovering stream job {}", table.id); + finished + .await + .map_err(|e| anyhow!("failed to finish command: {}", e))?; + + tracing::debug!("finished stream job {}", table.id); + // Once notified that job is finished we need to notify frontend. + // and mark catalog as created and commit to meta. + // both of these are done by catalog manager. + catalog_manager + .finish_create_table_procedure(internal_tables, table.clone()) + .await?; + tracing::debug!("notified frontend for stream job {}", table.id); + }; + if let Err(e) = res.as_ref() { + tracing::error!( + "stream job {} interrupted, will retry after recovery: {e:?}", + table.id + ); + // NOTE(kwannoel): We should not cleanup stream jobs, + // we don't know if it's just due to CN killed, + // or the job has actually failed. + // Users have to manually cancel the stream jobs, + // if they want to clean it. + } + }); + } + Ok(()) + } + /// Recovery the whole cluster from the latest epoch. /// /// If `paused_reason` is `Some`, all data sources (including connectors and DMLs) will be @@ -107,7 +215,7 @@ impl GlobalBarrierManager { /// the cluster or `risectl` command. Used for debugging purpose. /// /// Returns the new state of the barrier manager after recovery. - pub(crate) async fn recovery( + pub async fn recovery( &self, prev_epoch: TracedEpoch, paused_reason: Option, @@ -118,12 +226,23 @@ impl GlobalBarrierManager { .await; tracing::info!("recovery start!"); + self.clean_dirty_tables() + .await + .expect("clean dirty tables should not fail"); self.clean_dirty_fragments() .await .expect("clean dirty fragments"); + self.sink_manager.reset().await; let retry_strategy = Self::get_retry_strategy(); + // Mview progress needs to be recovered. + tracing::info!("recovering mview progress"); + self.recover_background_mv_progress() + .await + .expect("recover mview progress should not fail"); + tracing::info!("recovered mview progress"); + // We take retry into consideration because this is the latency user sees for a cluster to // get recovered. let recovery_timer = self.metrics.recovery_latency.start_timer(); @@ -172,6 +291,8 @@ impl GlobalBarrierManager { // Inject the `Initial` barrier to initialize all executors. let command_ctx = Arc::new(CommandContext::new( self.fragment_manager.clone(), + self.catalog_manager.clone(), + self.hummock_manager.clone(), self.env.stream_client_pool_ref(), info, prev_epoch.clone(), @@ -204,7 +325,7 @@ impl GlobalBarrierManager { warn!(err = ?err, "post_collect failed"); Err(err) } else { - Ok((new_epoch, response)) + Ok((new_epoch.clone(), response)) } } Err(err) => { diff --git a/src/meta/src/barrier/schedule.rs b/src/meta/src/barrier/schedule.rs index 7c9fefd15606b..c4718d97d40f6 100644 --- a/src/meta/src/barrier/schedule.rs +++ b/src/meta/src/barrier/schedule.rs @@ -393,7 +393,7 @@ impl ScheduledBarriers { } /// Make the `checkpoint` of the next barrier must be true - pub(crate) fn force_checkpoint_in_next_barrier(&self) { + pub fn force_checkpoint_in_next_barrier(&self) { self.inner.force_checkpoint.store(true, Ordering::Relaxed) } diff --git a/src/meta/src/controller/catalog.rs b/src/meta/src/controller/catalog.rs new file mode 100644 index 0000000000000..cb37307384aa2 --- /dev/null +++ b/src/meta/src/controller/catalog.rs @@ -0,0 +1,887 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::iter; + +use itertools::Itertools; +use risingwave_common::bail; +use risingwave_common::catalog::{DEFAULT_SCHEMA_NAME, SYSTEM_SCHEMAS}; +use risingwave_pb::catalog::{ + PbConnection, PbDatabase, PbFunction, PbIndex, PbSchema, PbSink, PbSource, PbTable, PbView, +}; +use risingwave_pb::meta::relation::PbRelationInfo; +use risingwave_pb::meta::subscribe_response::{ + Info as NotificationInfo, Operation as NotificationOperation, +}; +use risingwave_pb::meta::{PbRelation, PbRelationGroup}; +use sea_orm::{ + ActiveModelTrait, ActiveValue, ColumnTrait, DatabaseConnection, DatabaseTransaction, + EntityTrait, QueryFilter, QuerySelect, TransactionTrait, +}; +use tokio::sync::RwLock; + +use crate::controller::rename::{alter_relation_rename, alter_relation_rename_refs}; +use crate::controller::utils::{ + check_connection_name_duplicate, check_function_signature_duplicate, + check_relation_name_duplicate, check_schema_name_duplicate, ensure_object_id, + ensure_object_not_refer, ensure_schema_empty, ensure_user_id, get_referring_objects, + get_referring_objects_cascade, PartialObject, +}; +use crate::controller::ObjectModel; +use crate::manager::{MetaSrvEnv, NotificationVersion}; +use crate::model_v2::object::ObjectType; +use crate::model_v2::prelude::*; +use crate::model_v2::{ + connection, database, function, index, object, object_dependency, schema, sink, source, table, + view, ConnectionId, DatabaseId, FunctionId, ObjectId, PrivateLinkService, SchemaId, SourceId, + TableId, UserId, +}; +use crate::rpc::ddl_controller::DropMode; +use crate::{MetaError, MetaResult}; + +/// `CatalogController` is the controller for catalog related operations, including database, schema, table, view, etc. +pub struct CatalogController { + env: MetaSrvEnv, + inner: RwLock, +} + +#[derive(Clone, Default)] +pub struct ReleaseContext { + streaming_jobs: Vec, + source_ids: Vec, + connections: Vec, +} + +impl CatalogController { + pub fn new(env: MetaSrvEnv) -> MetaResult { + let meta_store = env + .sql_meta_store() + .expect("sql meta store is not initialized"); + Ok(Self { + env, + inner: RwLock::new(CatalogControllerInner { + db: meta_store.conn, + }), + }) + } +} + +struct CatalogControllerInner { + db: DatabaseConnection, +} + +impl CatalogController { + async fn notify_frontend( + &self, + operation: NotificationOperation, + info: NotificationInfo, + ) -> NotificationVersion { + self.env + .notification_manager() + .notify_frontend(operation, info) + .await + } + + async fn notify_frontend_relation_info( + &self, + operation: NotificationOperation, + relation_info: PbRelationInfo, + ) -> NotificationVersion { + self.env + .notification_manager() + .notify_frontend_relation_info(operation, relation_info) + .await + } +} + +impl CatalogController { + pub fn snapshot(&self) -> MetaResult<()> { + todo!("snapshot") + } + + async fn create_object( + txn: &DatabaseTransaction, + obj_type: ObjectType, + owner_id: UserId, + database_id: Option, + schema_id: Option, + ) -> MetaResult { + let active_db = object::ActiveModel { + oid: Default::default(), + obj_type: ActiveValue::Set(obj_type), + owner_id: ActiveValue::Set(owner_id), + schema_id: ActiveValue::Set(schema_id), + database_id: ActiveValue::Set(database_id), + initialized_at: Default::default(), + created_at: Default::default(), + }; + Ok(active_db.insert(txn).await?) + } + + pub async fn create_database(&self, db: PbDatabase) -> MetaResult { + let inner = self.inner.write().await; + let owner_id = db.owner; + let txn = inner.db.begin().await?; + ensure_user_id(owner_id, &txn).await?; + + let db_obj = Self::create_object(&txn, ObjectType::Database, owner_id, None, None).await?; + let mut db: database::ActiveModel = db.into(); + db.database_id = ActiveValue::Set(db_obj.oid); + let db = db.insert(&txn).await?; + + let mut schemas = vec![]; + for schema_name in iter::once(DEFAULT_SCHEMA_NAME).chain(SYSTEM_SCHEMAS) { + let schema_obj = + Self::create_object(&txn, ObjectType::Schema, owner_id, Some(db_obj.oid), None) + .await?; + let schema = schema::ActiveModel { + schema_id: ActiveValue::Set(schema_obj.oid), + name: ActiveValue::Set(schema_name.into()), + }; + let schema = schema.insert(&txn).await?; + schemas.push(ObjectModel(schema, schema_obj).into()); + } + txn.commit().await?; + + let mut version = self + .notify_frontend( + NotificationOperation::Add, + NotificationInfo::Database(ObjectModel(db, db_obj).into()), + ) + .await; + for schema in schemas { + version = self + .notify_frontend(NotificationOperation::Add, NotificationInfo::Schema(schema)) + .await; + } + + Ok(version) + } + + pub async fn drop_database( + &self, + database_id: DatabaseId, + ) -> MetaResult<(ReleaseContext, NotificationVersion)> { + let inner = self.inner.write().await; + let txn = inner.db.begin().await?; + ensure_object_id(ObjectType::Database, database_id, &txn).await?; + + let streaming_jobs: Vec = Object::find() + .select_only() + .column(object::Column::Oid) + .filter( + object::Column::DatabaseId + .eq(Some(database_id)) + .and(object::Column::ObjType.is_in([ObjectType::Table, ObjectType::Sink])), + ) + .into_tuple() + .all(&txn) + .await?; + + let source_ids: Vec = Object::find() + .select_only() + .column(object::Column::Oid) + .filter( + object::Column::DatabaseId + .eq(Some(database_id)) + .and(object::Column::ObjType.eq(ObjectType::Source)), + ) + .into_tuple() + .all(&txn) + .await?; + + let connections = Connection::find() + .inner_join(Object) + .filter(object::Column::DatabaseId.eq(Some(database_id))) + .all(&txn) + .await? + .into_iter() + .map(|conn| conn.info) + .collect_vec(); + + // The schema and objects in the database will be delete cascade. + let res = Object::delete_by_id(database_id).exec(&txn).await?; + if res.rows_affected == 0 { + return Err(MetaError::catalog_id_not_found("database", database_id)); + } + + txn.commit().await?; + + let version = self + .notify_frontend( + NotificationOperation::Delete, + NotificationInfo::Database(PbDatabase { + id: database_id, + ..Default::default() + }), + ) + .await; + Ok(( + ReleaseContext { + streaming_jobs, + source_ids, + connections, + }, + version, + )) + } + + pub async fn create_schema(&self, schema: PbSchema) -> MetaResult { + let inner = self.inner.write().await; + let owner_id = schema.owner; + let txn = inner.db.begin().await?; + ensure_user_id(owner_id, &txn).await?; + ensure_object_id(ObjectType::Database, schema.database_id, &txn).await?; + check_schema_name_duplicate(&schema.name, schema.database_id, &txn).await?; + + let schema_obj = Self::create_object( + &txn, + ObjectType::Schema, + owner_id, + Some(schema.database_id), + None, + ) + .await?; + let mut schema: schema::ActiveModel = schema.into(); + schema.schema_id = ActiveValue::Set(schema_obj.oid); + let schema = schema.insert(&txn).await?; + txn.commit().await?; + + let version = self + .notify_frontend( + NotificationOperation::Add, + NotificationInfo::Schema(ObjectModel(schema, schema_obj).into()), + ) + .await; + Ok(version) + } + + pub async fn drop_schema( + &self, + schema_id: SchemaId, + drop_mode: DropMode, + ) -> MetaResult { + let inner = self.inner.write().await; + let schema_obj = Object::find_by_id(schema_id) + .one(&inner.db) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found("schema", schema_id))?; + if drop_mode == DropMode::Restrict { + ensure_schema_empty(schema_id, &inner.db).await?; + } + + let res = Object::delete(object::ActiveModel { + oid: ActiveValue::Set(schema_id), + ..Default::default() + }) + .exec(&inner.db) + .await?; + if res.rows_affected == 0 { + return Err(MetaError::catalog_id_not_found("schema", schema_id)); + } + + // todo: update user privileges accordingly. + let version = self + .notify_frontend( + NotificationOperation::Delete, + NotificationInfo::Schema(PbSchema { + id: schema_id, + database_id: schema_obj.database_id.unwrap(), + ..Default::default() + }), + ) + .await; + Ok(version) + } + + pub async fn create_function( + &self, + mut pb_function: PbFunction, + ) -> MetaResult { + let inner = self.inner.write().await; + let owner_id = pb_function.owner; + let txn = inner.db.begin().await?; + ensure_user_id(owner_id, &txn).await?; + ensure_object_id(ObjectType::Database, pb_function.database_id, &txn).await?; + ensure_object_id(ObjectType::Schema, pb_function.schema_id, &txn).await?; + check_function_signature_duplicate(&pb_function, &txn).await?; + + let function_obj = Self::create_object( + &txn, + ObjectType::Function, + owner_id, + Some(pb_function.database_id), + Some(pb_function.schema_id), + ) + .await?; + pb_function.id = function_obj.oid; + let function: function::ActiveModel = pb_function.clone().into(); + function.insert(&txn).await?; + txn.commit().await?; + + let version = self + .notify_frontend( + NotificationOperation::Add, + NotificationInfo::Function(pb_function), + ) + .await; + Ok(version) + } + + pub async fn drop_function(&self, function_id: FunctionId) -> MetaResult { + let inner = self.inner.write().await; + let function_obj = Object::find_by_id(function_id) + .one(&inner.db) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found("function", function_id))?; + ensure_object_not_refer(ObjectType::Function, function_id, &inner.db).await?; + + let res = Object::delete_by_id(function_id).exec(&inner.db).await?; + if res.rows_affected == 0 { + return Err(MetaError::catalog_id_not_found("function", function_id)); + } + + let version = self + .notify_frontend( + NotificationOperation::Delete, + NotificationInfo::Function(PbFunction { + id: function_id, + schema_id: function_obj.schema_id.unwrap(), + database_id: function_obj.database_id.unwrap(), + ..Default::default() + }), + ) + .await; + Ok(version) + } + + pub async fn create_connection( + &self, + mut pb_connection: PbConnection, + ) -> MetaResult { + let inner = self.inner.write().await; + let owner_id = pb_connection.owner; + let txn = inner.db.begin().await?; + ensure_user_id(owner_id, &txn).await?; + ensure_object_id(ObjectType::Database, pb_connection.database_id, &txn).await?; + ensure_object_id(ObjectType::Schema, pb_connection.schema_id, &txn).await?; + check_connection_name_duplicate(&pb_connection, &txn).await?; + + let conn_obj = Self::create_object( + &txn, + ObjectType::Connection, + owner_id, + Some(pb_connection.database_id), + Some(pb_connection.schema_id), + ) + .await?; + pb_connection.id = conn_obj.oid; + let connection: connection::ActiveModel = pb_connection.clone().into(); + connection.insert(&txn).await?; + + txn.commit().await?; + + let version = self + .notify_frontend( + NotificationOperation::Add, + NotificationInfo::Connection(pb_connection), + ) + .await; + Ok(version) + } + + pub async fn get_connection_by_id( + &self, + connection_id: ConnectionId, + ) -> MetaResult { + let inner = self.inner.read().await; + let (conn, obj) = Connection::find_by_id(connection_id) + .find_also_related(Object) + .one(&inner.db) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found("connection", connection_id))?; + + Ok(ObjectModel(conn, obj.unwrap()).into()) + } + + pub async fn drop_connection( + &self, + connection_id: ConnectionId, + ) -> MetaResult { + let inner = self.inner.write().await; + let connection_obj = Object::find_by_id(connection_id) + .one(&inner.db) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found("connection", connection_id))?; + ensure_object_not_refer(ObjectType::Connection, connection_id, &inner.db).await?; + + let res = Object::delete_by_id(connection_id).exec(&inner.db).await?; + if res.rows_affected == 0 { + return Err(MetaError::catalog_id_not_found("connection", connection_id)); + } + + let version = self + .notify_frontend( + NotificationOperation::Delete, + NotificationInfo::Connection(PbConnection { + id: connection_id, + schema_id: connection_obj.schema_id.unwrap(), + database_id: connection_obj.database_id.unwrap(), + ..Default::default() + }), + ) + .await; + Ok(version) + } + + pub async fn create_view(&self, mut pb_view: PbView) -> MetaResult { + let inner = self.inner.write().await; + let owner_id = pb_view.owner; + let txn = inner.db.begin().await?; + ensure_user_id(owner_id, &txn).await?; + ensure_object_id(ObjectType::Database, pb_view.database_id, &txn).await?; + ensure_object_id(ObjectType::Schema, pb_view.schema_id, &txn).await?; + check_relation_name_duplicate(&pb_view.name, pb_view.database_id, pb_view.schema_id, &txn) + .await?; + + let view_obj = Self::create_object( + &txn, + ObjectType::View, + owner_id, + Some(pb_view.database_id), + Some(pb_view.schema_id), + ) + .await?; + pb_view.id = view_obj.oid; + let view: view::ActiveModel = pb_view.clone().into(); + view.insert(&txn).await?; + + // todo: change `dependent_relations` to `dependent_objects`, which should includes connection and function as well. + // todo: shall we need to check existence of them Or let database handle it by FOREIGN KEY constraint. + for obj_id in &pb_view.dependent_relations { + object_dependency::ActiveModel { + oid: ActiveValue::Set(*obj_id), + used_by: ActiveValue::Set(view_obj.oid), + ..Default::default() + } + .insert(&txn) + .await?; + } + + txn.commit().await?; + + let version = self + .notify_frontend_relation_info( + NotificationOperation::Add, + PbRelationInfo::View(pb_view), + ) + .await; + Ok(version) + } + + pub async fn drop_relation( + &self, + object_type: ObjectType, + object_id: ObjectId, + drop_mode: DropMode, + ) -> MetaResult<(ReleaseContext, NotificationVersion)> { + let inner = self.inner.write().await; + let txn = inner.db.begin().await?; + let obj: PartialObject = Object::find_by_id(object_id) + .into_partial_model() + .one(&txn) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found(object_type.as_str(), object_id))?; + assert_eq!(obj.obj_type, object_type); + + let mut to_drop_objects = match drop_mode { + DropMode::Cascade => get_referring_objects_cascade(object_id, &txn).await?, + DropMode::Restrict => { + ensure_object_not_refer(object_type, object_id, &txn).await?; + vec![] + } + }; + assert!( + to_drop_objects.iter().all(|obj| matches!( + obj.obj_type, + ObjectType::Table | ObjectType::Index | ObjectType::Sink | ObjectType::View + )), + "only these objects will depends on others" + ); + to_drop_objects.push(obj); + + let to_drop_table_ids = to_drop_objects + .iter() + .filter(|obj| obj.obj_type == ObjectType::Table) + .map(|obj| obj.oid); + let mut to_drop_streaming_jobs = to_drop_objects + .iter() + .filter(|obj| obj.obj_type == ObjectType::Table || obj.obj_type == ObjectType::Sink) + .map(|obj| obj.oid) + .collect_vec(); + // todo: record index dependency info in the object dependency table. + let to_drop_index_ids = to_drop_objects + .iter() + .filter(|obj| obj.obj_type == ObjectType::Index) + .map(|obj| obj.oid) + .collect_vec(); + + // Add associated sources. + let mut to_drop_source_ids: Vec = Table::find() + .select_only() + .column(table::Column::OptionalAssociatedSourceId) + .filter( + table::Column::TableId + .is_in(to_drop_table_ids) + .and(table::Column::OptionalAssociatedSourceId.is_not_null()), + ) + .into_tuple() + .all(&txn) + .await?; + let to_drop_source_objs: Vec = Object::find() + .filter(object::Column::Oid.is_in(to_drop_source_ids.clone())) + .into_partial_model() + .all(&txn) + .await?; + to_drop_objects.extend(to_drop_source_objs.clone()); + if object_type == ObjectType::Source { + to_drop_source_ids.push(object_id); + } + + // add internal tables. + let index_table_ids: Vec = Index::find() + .select_only() + .column(index::Column::IndexTableId) + .filter(index::Column::IndexId.is_in(to_drop_index_ids)) + .into_tuple() + .all(&txn) + .await?; + to_drop_streaming_jobs.extend(index_table_ids); + let to_drop_internal_table_objs: Vec = Object::find() + .filter(object::Column::Oid.is_in(to_drop_streaming_jobs.clone())) + .into_partial_model() + .all(&txn) + .await?; + to_drop_objects.extend(to_drop_internal_table_objs); + + // delete all in to_drop_objects. + let res = Object::delete_many() + .filter(object::Column::Oid.is_in(to_drop_objects.iter().map(|obj| obj.oid))) + .exec(&txn) + .await?; + if res.rows_affected == 0 { + return Err(MetaError::catalog_id_not_found( + object_type.as_str(), + object_id, + )); + } + + // notify about them. + let relations = to_drop_objects + .into_iter() + .map(|obj| match obj.obj_type { + ObjectType::Table => PbRelation { + relation_info: Some(PbRelationInfo::Table(PbTable { + id: obj.oid, + schema_id: obj.schema_id.unwrap(), + database_id: obj.database_id.unwrap(), + ..Default::default() + })), + }, + ObjectType::Source => PbRelation { + relation_info: Some(PbRelationInfo::Source(PbSource { + id: obj.oid, + schema_id: obj.schema_id.unwrap(), + database_id: obj.database_id.unwrap(), + ..Default::default() + })), + }, + ObjectType::Sink => PbRelation { + relation_info: Some(PbRelationInfo::Sink(PbSink { + id: obj.oid, + schema_id: obj.schema_id.unwrap(), + database_id: obj.database_id.unwrap(), + ..Default::default() + })), + }, + ObjectType::View => PbRelation { + relation_info: Some(PbRelationInfo::View(PbView { + id: obj.oid, + schema_id: obj.schema_id.unwrap(), + database_id: obj.database_id.unwrap(), + ..Default::default() + })), + }, + ObjectType::Index => PbRelation { + relation_info: Some(PbRelationInfo::Index(PbIndex { + id: obj.oid, + schema_id: obj.schema_id.unwrap(), + database_id: obj.database_id.unwrap(), + ..Default::default() + })), + }, + _ => unreachable!("only relations will be dropped."), + }) + .collect_vec(); + let version = self + .notify_frontend( + NotificationOperation::Delete, + NotificationInfo::RelationGroup(PbRelationGroup { relations }), + ) + .await; + + Ok(( + ReleaseContext { + streaming_jobs: to_drop_streaming_jobs, + source_ids: to_drop_source_ids, + connections: vec![], + }, + version, + )) + } + + pub async fn alter_relation_name( + &self, + object_type: ObjectType, + object_id: ObjectId, + object_name: &str, + ) -> MetaResult { + let inner = self.inner.write().await; + let txn = inner.db.begin().await?; + let obj: PartialObject = Object::find_by_id(object_id) + .into_partial_model() + .one(&txn) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found(object_type.as_str(), object_id))?; + assert_eq!(obj.obj_type, object_type); + check_relation_name_duplicate( + object_name, + obj.database_id.unwrap(), + obj.schema_id.unwrap(), + &txn, + ) + .await?; + + let mut to_update_relations = vec![]; + // rename relation. + macro_rules! rename_relation { + ($entity:ident, $table:ident, $identity:ident, $object_id:expr) => {{ + let (mut relation, obj) = $entity::find_by_id($object_id) + .find_also_related(Object) + .one(&txn) + .await? + .unwrap(); + let old_name = relation.name.clone(); + relation.name = object_name.into(); + relation.definition = alter_relation_rename(&relation.definition, object_name); + let active_model = $table::ActiveModel { + $identity: ActiveValue::Set(relation.$identity), + name: ActiveValue::Set(object_name.into()), + definition: ActiveValue::Set(relation.definition.clone()), + ..Default::default() + }; + active_model.update(&txn).await?; + to_update_relations.push(PbRelation { + relation_info: Some(PbRelationInfo::$entity( + ObjectModel(relation, obj.unwrap()).into(), + )), + }); + old_name + }}; + } + + let old_name = match object_type { + ObjectType::Table => rename_relation!(Table, table, table_id, object_id), + ObjectType::Source => rename_relation!(Source, source, source_id, object_id), + ObjectType::Sink => rename_relation!(Sink, sink, sink_id, object_id), + ObjectType::View => rename_relation!(View, view, view_id, object_id), + ObjectType::Index => { + let (mut index, obj) = Index::find_by_id(object_id) + .find_also_related(Object) + .one(&txn) + .await? + .unwrap(); + index.name = object_name.into(); + let index_table_id = index.index_table_id; + + // the name of index and its associated table is the same. + let active_model = index::ActiveModel { + index_id: ActiveValue::Set(index.index_id), + name: ActiveValue::Set(object_name.into()), + ..Default::default() + }; + active_model.update(&txn).await?; + to_update_relations.push(PbRelation { + relation_info: Some(PbRelationInfo::Index( + ObjectModel(index, obj.unwrap()).into(), + )), + }); + rename_relation!(Table, table, table_id, index_table_id) + } + _ => unreachable!("only relation name can be altered."), + }; + + // rename referring relation name. + macro_rules! rename_relation_ref { + ($entity:ident, $table:ident, $identity:ident, $object_id:expr) => {{ + let (mut relation, obj) = $entity::find_by_id($object_id) + .find_also_related(Object) + .one(&txn) + .await? + .unwrap(); + relation.definition = + alter_relation_rename_refs(&relation.definition, &old_name, object_name); + let active_model = $table::ActiveModel { + $identity: ActiveValue::Set(relation.$identity), + definition: ActiveValue::Set(relation.definition.clone()), + ..Default::default() + }; + active_model.update(&txn).await?; + to_update_relations.push(PbRelation { + relation_info: Some(PbRelationInfo::$entity( + ObjectModel(relation, obj.unwrap()).into(), + )), + }); + }}; + } + let objs = get_referring_objects(object_id, &txn).await?; + for obj in objs { + match obj.obj_type { + ObjectType::Table => rename_relation_ref!(Table, table, table_id, obj.oid), + ObjectType::Sink => rename_relation_ref!(Sink, sink, sink_id, obj.oid), + ObjectType::View => rename_relation_ref!(View, view, view_id, obj.oid), + ObjectType::Index => { + let index_table_id: Option = Index::find_by_id(obj.oid) + .select_only() + .column(index::Column::IndexTableId) + .into_tuple() + .one(&txn) + .await?; + rename_relation_ref!(Table, table, table_id, index_table_id.unwrap()); + } + _ => bail!("only table, sink, view and index depend on other objects."), + } + } + txn.commit().await?; + + let version = self + .notify_frontend( + NotificationOperation::Update, + NotificationInfo::RelationGroup(PbRelationGroup { + relations: to_update_relations, + }), + ) + .await; + + Ok(version) + } +} + +#[cfg(test)] +#[cfg(not(madsim))] +mod tests { + use risingwave_common::catalog::DEFAULT_SUPER_USER_ID; + + use super::*; + + const TEST_DATABASE_ID: DatabaseId = 1; + const TEST_SCHEMA_ID: SchemaId = 2; + const TEST_OWNER_ID: UserId = 1; + + #[tokio::test] + async fn test_create_database() -> MetaResult<()> { + let mgr = CatalogController::new(MetaSrvEnv::for_test().await)?; + let db = PbDatabase { + name: "test".to_string(), + owner: DEFAULT_SUPER_USER_ID, + ..Default::default() + }; + mgr.create_database(db).await?; + + let db = Database::find() + .filter(database::Column::Name.eq("test")) + .one(&mgr.inner.read().await.db) + .await? + .unwrap(); + mgr.drop_database(db.database_id).await?; + Ok(()) + } + + #[tokio::test] + async fn test_create_view() -> MetaResult<()> { + let mgr = CatalogController::new(MetaSrvEnv::for_test().await)?; + let pb_view = PbView { + schema_id: TEST_SCHEMA_ID, + database_id: TEST_DATABASE_ID, + name: "view".to_string(), + owner: TEST_OWNER_ID, + sql: "CREATE VIEW view AS SELECT 1".to_string(), + ..Default::default() + }; + mgr.create_view(pb_view.clone()).await?; + assert!(mgr.create_view(pb_view).await.is_err()); + + let view = View::find().one(&mgr.inner.read().await.db).await?.unwrap(); + mgr.drop_relation(ObjectType::View, view.view_id, DropMode::Cascade) + .await?; + + Ok(()) + } + + #[tokio::test] + async fn test_create_function() -> MetaResult<()> { + let mgr = CatalogController::new(MetaSrvEnv::for_test().await)?; + let return_type = risingwave_pb::data::DataType { + type_name: risingwave_pb::data::data_type::TypeName::Int32 as _, + ..Default::default() + }; + let pb_function = PbFunction { + schema_id: TEST_SCHEMA_ID, + database_id: TEST_DATABASE_ID, + name: "test_function".to_string(), + owner: TEST_OWNER_ID, + arg_types: vec![], + return_type: Some(return_type.clone()), + language: "python".to_string(), + kind: Some(risingwave_pb::catalog::function::Kind::Scalar( + Default::default(), + )), + ..Default::default() + }; + mgr.create_function(pb_function.clone()).await?; + assert!(mgr.create_function(pb_function).await.is_err()); + + let function = Function::find() + .inner_join(Object) + .filter( + object::Column::DatabaseId + .eq(TEST_DATABASE_ID) + .and(object::Column::SchemaId.eq(TEST_SCHEMA_ID)) + .add(function::Column::Name.eq("test_function")), + ) + .one(&mgr.inner.read().await.db) + .await? + .unwrap(); + assert_eq!(function.return_type.0, return_type); + assert_eq!(function.language, "python"); + + mgr.drop_function(function.function_id).await?; + assert!(Object::find_by_id(function.function_id) + .one(&mgr.inner.read().await.db) + .await? + .is_none()); + + Ok(()) + } +} diff --git a/src/meta/src/controller/cluster.rs b/src/meta/src/controller/cluster.rs new file mode 100644 index 0000000000000..ca29380a49fca --- /dev/null +++ b/src/meta/src/controller/cluster.rs @@ -0,0 +1,988 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp; +use std::cmp::Ordering; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::ops::Add; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; + +use itertools::Itertools; +use risingwave_common::hash::ParallelUnitId; +use risingwave_hummock_sdk::HummockSstableObjectId; +use risingwave_pb::common::worker_node::{PbProperty, PbState}; +use risingwave_pb::common::{ + HostAddress, ParallelUnit, PbHostAddress, PbParallelUnit, PbWorkerNode, PbWorkerType, +}; +use risingwave_pb::meta::add_worker_node_request::Property as AddNodeProperty; +use risingwave_pb::meta::heartbeat_request; +use risingwave_pb::meta::subscribe_response::{Info, Operation}; +use risingwave_pb::meta::update_worker_node_schedulability_request::Schedulability; +use sea_orm::prelude::Expr; +use sea_orm::{ + ActiveModelTrait, ActiveValue, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, + QuerySelect, TransactionTrait, +}; +use tokio::sync::oneshot::Sender; +use tokio::sync::{RwLock, RwLockReadGuard}; +use tokio::task::JoinHandle; + +use crate::manager::prelude::{Worker, WorkerProperty}; +use crate::manager::{LocalNotification, MetaSrvEnv, WorkerKey}; +use crate::model_v2::worker::{WorkerStatus, WorkerType}; +use crate::model_v2::{worker, worker_property, I32Array, TransactionId, WorkerId}; +use crate::{MetaError, MetaResult}; + +pub type ClusterControllerRef = Arc; + +pub struct ClusterController { + env: MetaSrvEnv, + max_heartbeat_interval: Duration, + inner: RwLock, +} + +struct WorkerInfo(worker::Model, Option); + +impl From for PbWorkerNode { + fn from(info: WorkerInfo) -> Self { + Self { + id: info.0.worker_id, + r#type: PbWorkerType::from(info.0.worker_type) as _, + host: Some(PbHostAddress { + host: info.0.host, + port: info.0.port, + }), + state: PbState::from(info.0.status) as _, + parallel_units: info + .1 + .as_ref() + .map(|p| { + p.parallel_unit_ids + .0 + .iter() + .map(|&id| PbParallelUnit { + id: id as _, + worker_node_id: info.0.worker_id, + }) + .collect_vec() + }) + .unwrap_or_default(), + property: info.1.as_ref().map(|p| PbProperty { + is_streaming: p.is_streaming, + is_serving: p.is_serving, + is_unschedulable: p.is_unschedulable, + }), + transactional_id: info.0.transaction_id, + } + } +} + +impl From for WorkerType { + fn from(worker_type: PbWorkerType) -> Self { + match worker_type { + PbWorkerType::Unspecified => unreachable!("unspecified worker type"), + PbWorkerType::Frontend => Self::Frontend, + PbWorkerType::ComputeNode => Self::ComputeNode, + PbWorkerType::RiseCtl => Self::RiseCtl, + PbWorkerType::Compactor => Self::Compactor, + PbWorkerType::Meta => Self::Meta, + } + } +} + +impl From for PbWorkerType { + fn from(worker_type: WorkerType) -> Self { + match worker_type { + WorkerType::Frontend => Self::Frontend, + WorkerType::ComputeNode => Self::ComputeNode, + WorkerType::RiseCtl => Self::RiseCtl, + WorkerType::Compactor => Self::Compactor, + WorkerType::Meta => Self::Meta, + } + } +} + +impl From for WorkerStatus { + fn from(state: PbState) -> Self { + match state { + PbState::Unspecified => unreachable!("unspecified worker status"), + PbState::Starting => Self::Starting, + PbState::Running => Self::Running, + } + } +} + +impl From for PbState { + fn from(status: WorkerStatus) -> Self { + match status { + WorkerStatus::Starting => Self::Starting, + WorkerStatus::Running => Self::Running, + } + } +} + +impl From<&PbWorkerNode> for worker::ActiveModel { + fn from(worker: &PbWorkerNode) -> Self { + let host = worker.host.clone().unwrap(); + Self { + worker_id: ActiveValue::Set(worker.id), + worker_type: ActiveValue::Set(worker.r#type().into()), + host: ActiveValue::Set(host.host), + port: ActiveValue::Set(host.port), + status: ActiveValue::Set(worker.state().into()), + ..Default::default() + } + } +} + +impl ClusterController { + pub async fn new(env: MetaSrvEnv, max_heartbeat_interval: Duration) -> MetaResult { + let meta_store = env + .sql_meta_store() + .expect("sql meta store is not initialized"); + let inner = ClusterControllerInner::new(meta_store.conn).await?; + Ok(Self { + env, + max_heartbeat_interval, + inner: RwLock::new(inner), + }) + } + + /// Used in `NotificationService::subscribe`. + /// Need to pay attention to the order of acquiring locks to prevent deadlock problems. + pub async fn get_inner_guard(&self) -> RwLockReadGuard<'_, ClusterControllerInner> { + self.inner.read().await + } + + pub async fn count_worker_by_type(&self) -> MetaResult> { + self.inner.read().await.count_worker_by_type().await + } + + /// A worker node will immediately register itself to meta when it bootstraps. + /// The meta will assign it with a unique ID and set its state as `Starting`. + /// When the worker node is fully ready to serve, it will request meta again + /// (via `activate_worker_node`) to set its state to `Running`. + pub async fn add_worker( + &self, + r#type: PbWorkerType, + host_address: HostAddress, + property: AddNodeProperty, + ) -> MetaResult { + self.inner + .write() + .await + .add_worker(r#type, host_address, property, self.max_heartbeat_interval) + .await + } + + pub async fn activate_worker(&self, worker_id: WorkerId) -> MetaResult<()> { + let inner = self.inner.write().await; + let worker = inner.activate_worker(worker_id).await?; + + // Notify frontends of new compute node. + // Always notify because a running worker's property may have been changed. + if worker.r#type() == PbWorkerType::ComputeNode { + self.env + .notification_manager() + .notify_frontend(Operation::Add, Info::Node(worker.clone())) + .await; + } + self.env + .notification_manager() + .notify_local_subscribers(LocalNotification::WorkerNodeActivated(worker)) + .await; + + Ok(()) + } + + pub async fn delete_worker(&self, host_address: HostAddress) -> MetaResult { + let mut inner = self.inner.write().await; + let worker = inner.delete_worker(host_address).await?; + if worker.r#type() == PbWorkerType::ComputeNode { + self.env + .notification_manager() + .notify_frontend(Operation::Delete, Info::Node(worker.clone())) + .await; + } + + // Notify local subscribers. + // Note: Any type of workers may pin some hummock resource. So `HummockManager` expect this + // local notification. + self.env + .notification_manager() + .notify_local_subscribers(LocalNotification::WorkerNodeDeleted(worker.clone())) + .await; + + Ok(worker) + } + + pub async fn update_schedulability( + &self, + worker_ids: Vec, + schedulability: Schedulability, + ) -> MetaResult<()> { + self.inner + .write() + .await + .update_schedulability(worker_ids, schedulability) + .await + } + + /// Invoked when it receives a heartbeat from a worker node. + pub async fn heartbeat( + &self, + worker_id: WorkerId, + info: Vec, + ) { + tracing::trace!(target: "events::meta::server_heartbeat", worker_id = worker_id, "receive heartbeat"); + self.inner + .write() + .await + .heartbeat(worker_id, self.max_heartbeat_interval, info) + } + + pub fn start_heartbeat_checker( + cluster_controller: ClusterController, + check_interval: Duration, + ) -> (JoinHandle<()>, Sender<()>) { + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); + let join_handle = tokio::spawn(async move { + let mut min_interval = tokio::time::interval(check_interval); + min_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + loop { + tokio::select! { + // Wait for interval + _ = min_interval.tick() => {}, + // Shutdown + _ = &mut shutdown_rx => { + tracing::info!("Heartbeat checker is stopped"); + return; + } + } + + let mut inner = cluster_controller.inner.write().await; + // 1. Initialize new workers' TTL. + for worker in inner + .worker_extra_info + .values_mut() + .filter(|worker| worker.expire_at.is_none()) + { + worker.update_ttl(cluster_controller.max_heartbeat_interval); + } + + // 2. Collect expired workers. + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Clock may have gone backwards") + .as_secs(); + let worker_to_delete = inner + .worker_extra_info + .iter() + .filter(|(_, info)| info.expire_at.unwrap() < now) + .map(|(id, _)| *id) + .collect_vec(); + + // 3. Delete expired workers. + let worker_infos = match Worker::find() + .select_only() + .column(worker::Column::WorkerType) + .column(worker::Column::Host) + .column(worker::Column::Port) + .into_tuple::<(WorkerType, String, i32)>() + .all(&inner.db) + .await + { + Ok(keys) => keys, + Err(err) => { + tracing::warn!("Failed to load expire worker info from db: {}", err); + continue; + } + }; + + if let Err(err) = Worker::delete_many() + .filter(worker::Column::WorkerId.is_in(worker_to_delete)) + .exec(&inner.db) + .await + { + tracing::warn!("Failed to delete expire workers from db: {}", err); + continue; + } + + for (worker_type, host, port) in worker_infos { + match worker_type { + WorkerType::Frontend + | WorkerType::ComputeNode + | WorkerType::Compactor + | WorkerType::RiseCtl => { + cluster_controller + .env + .notification_manager() + .delete_sender( + worker_type.into(), + WorkerKey(HostAddress { host, port }), + ) + .await + } + _ => {} + }; + } + } + }); + + (join_handle, shutdown_tx) + } + + /// Get live nodes with the specified type and state. + /// # Arguments + /// * `worker_type` `WorkerType` of the nodes + /// * `worker_state` Filter by this state if it is not None. + pub async fn list_workers( + &self, + worker_type: WorkerType, + worker_status: Option, + ) -> MetaResult> { + self.inner + .read() + .await + .list_workers(worker_type, worker_status) + .await + } + + /// A convenient method to get all running compute nodes that may have running actors on them + /// i.e. CNs which are running + pub async fn list_active_streaming_workers(&self) -> MetaResult> { + self.inner + .read() + .await + .list_active_streaming_workers() + .await + } + + pub async fn list_active_parallel_units(&self) -> MetaResult> { + self.inner.read().await.list_active_parallel_units().await + } + + /// Get the cluster info used for scheduling a streaming job, containing all nodes that are + /// running and schedulable + pub async fn list_active_serving_workers(&self) -> MetaResult> { + self.inner.read().await.list_active_serving_workers().await + } + + /// Get the cluster info used for scheduling a streaming job. + pub async fn get_streaming_cluster_info(&self) -> MetaResult { + self.inner.read().await.get_streaming_cluster_info().await + } + + pub async fn get_worker_by_id(&self, worker_id: WorkerId) -> MetaResult> { + self.inner.read().await.get_worker_by_id(worker_id).await + } + + pub async fn get_worker_info_by_id(&self, worker_id: WorkerId) -> Option { + self.inner + .read() + .await + .get_worker_extra_info_by_id(worker_id) + } +} + +#[derive(Default, Clone)] +pub struct WorkerExtraInfo { + // Volatile values updated by meta node as follows. + // + // Unix timestamp that the worker will expire at. + expire_at: Option, + // Monotonic increasing id since meta node bootstrap. + info_version_id: u64, + // GC watermark. + hummock_gc_watermark: Option, +} + +impl WorkerExtraInfo { + fn update_ttl(&mut self, ttl: Duration) { + let expire = cmp::max( + self.expire_at.unwrap_or_default(), + SystemTime::now() + .add(ttl) + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Clock may have gone backwards") + .as_secs(), + ); + self.expire_at = Some(expire); + } + + fn update_hummock_info(&mut self, info: Vec) { + self.info_version_id += 1; + for i in info { + match i { + heartbeat_request::extra_info::Info::HummockGcWatermark(watermark) => { + self.hummock_gc_watermark = Some(watermark); + } + } + } + } +} + +/// The cluster info used for scheduling a streaming job. +#[derive(Debug, Clone)] +pub struct StreamingClusterInfo { + /// All **active** compute nodes in the cluster. + pub worker_nodes: HashMap, + + /// All parallel units of the **active** compute nodes in the cluster. + pub parallel_units: HashMap, + + /// All unschedulable parallel units of compute nodes in the cluster. + pub unschedulable_parallel_units: HashMap, +} + +pub struct ClusterControllerInner { + db: DatabaseConnection, + /// Record for tracking available machine ids, one is available. + available_transactional_ids: VecDeque, + worker_extra_info: HashMap, +} + +impl ClusterControllerInner { + pub const MAX_WORKER_REUSABLE_ID_BITS: usize = 10; + pub const MAX_WORKER_REUSABLE_ID_COUNT: usize = 1 << Self::MAX_WORKER_REUSABLE_ID_BITS; + + pub async fn new(db: DatabaseConnection) -> MetaResult { + let workers: Vec<(WorkerId, Option)> = Worker::find() + .select_only() + .column(worker::Column::WorkerId) + .column(worker::Column::TransactionId) + .into_tuple() + .all(&db) + .await?; + let inuse_txn_ids: HashSet<_> = workers + .iter() + .cloned() + .filter_map(|(_, txn_id)| txn_id) + .collect(); + let available_transactional_ids = (0..Self::MAX_WORKER_REUSABLE_ID_COUNT as TransactionId) + .filter(|id| !inuse_txn_ids.contains(id)) + .collect(); + + let worker_extra_info = workers + .into_iter() + .map(|(w, _)| (w, WorkerExtraInfo::default())) + .collect(); + + Ok(Self { + db, + available_transactional_ids, + worker_extra_info, + }) + } + + pub async fn count_worker_by_type(&self) -> MetaResult> { + let workers: Vec<(WorkerType, i32)> = Worker::find() + .select_only() + .column(worker::Column::WorkerType) + .column_as(worker::Column::WorkerId.count(), "count") + .group_by(worker::Column::WorkerType) + .into_tuple() + .all(&self.db) + .await?; + + Ok(workers.into_iter().collect()) + } + + pub fn update_worker_ttl(&mut self, worker_id: WorkerId, ttl: Duration) -> MetaResult<()> { + if let Some(info) = self.worker_extra_info.get_mut(&worker_id) { + let expire = cmp::max( + info.expire_at.unwrap_or_default(), + SystemTime::now() + .add(ttl) + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Clock may have gone backwards") + .as_secs(), + ); + info.expire_at = Some(expire); + Ok(()) + } else { + Err(MetaError::invalid_worker( + worker_id, + "worker not found".into(), + )) + } + } + + fn apply_transaction_id(&self, r#type: PbWorkerType) -> MetaResult> { + match (self.available_transactional_ids.front(), r#type) { + (None, _) => Err(MetaError::unavailable( + "no available reusable machine id".to_string(), + )), + // We only assign transactional id to compute node and frontend. + (Some(id), PbWorkerType::ComputeNode | PbWorkerType::Frontend) => Ok(Some(*id)), + _ => Ok(None), + } + } + + pub async fn add_worker( + &mut self, + r#type: PbWorkerType, + host_address: HostAddress, + add_property: AddNodeProperty, + ttl: Duration, + ) -> MetaResult { + let txn = self.db.begin().await?; + + // TODO: remove this workaround when we deprecate parallel unit ids. + let derive_parallel_units = |txn_id: TransactionId, start: u32, end: u32| { + (start..end) + .map(|idx| ((idx << Self::MAX_WORKER_REUSABLE_ID_BITS) + txn_id) as i32) + .collect_vec() + }; + + let worker = Worker::find() + .filter( + worker::Column::Host + .eq(host_address.host.clone()) + .and(worker::Column::Port.eq(host_address.port)), + ) + .find_also_related(WorkerProperty) + .one(&txn) + .await?; + // Worker already exist. + if let Some((worker, property)) = worker { + assert_eq!(worker.worker_type, r#type.into()); + return if worker.worker_type == WorkerType::ComputeNode { + let property = property.unwrap(); + let txn_id = worker.transaction_id.unwrap(); + let mut current_parallelism = property.parallel_unit_ids.0.clone(); + let new_parallelism = add_property.worker_node_parallelism as usize; + + match new_parallelism.cmp(¤t_parallelism.len()) { + Ordering::Less => { + // Warn and keep the original parallelism if the worker registered with a + // smaller parallelism. + tracing::warn!( + "worker {} parallelism is less than current, current is {}, but received {}", + worker.worker_id, + current_parallelism.len(), + new_parallelism + ); + } + Ordering::Greater => { + tracing::info!( + "worker {} parallelism updated from {} to {}", + worker.worker_id, + current_parallelism.len(), + new_parallelism + ); + current_parallelism.extend(derive_parallel_units( + txn_id, + current_parallelism.len() as _, + new_parallelism as _, + )); + } + Ordering::Equal => {} + } + let mut property: worker_property::ActiveModel = property.into(); + + // keep `is_unschedulable` unchanged. + property.is_streaming = ActiveValue::Set(add_property.is_streaming); + property.is_serving = ActiveValue::Set(add_property.is_serving); + property.parallel_unit_ids = ActiveValue::Set(I32Array(current_parallelism)); + + WorkerProperty::update(property).exec(&txn).await?; + txn.commit().await?; + self.update_worker_ttl(worker.worker_id, ttl)?; + Ok(worker.worker_id) + } else { + self.update_worker_ttl(worker.worker_id, ttl)?; + Ok(worker.worker_id) + }; + } + let txn_id = self.apply_transaction_id(r#type)?; + + let worker = worker::ActiveModel { + worker_id: Default::default(), + worker_type: ActiveValue::Set(r#type.into()), + host: ActiveValue::Set(host_address.host), + port: ActiveValue::Set(host_address.port), + status: ActiveValue::Set(WorkerStatus::Starting), + transaction_id: ActiveValue::Set(txn_id), + }; + let insert_res = Worker::insert(worker).exec(&txn).await?; + let worker_id = insert_res.last_insert_id as WorkerId; + if r#type == PbWorkerType::ComputeNode { + let property = worker_property::ActiveModel { + worker_id: ActiveValue::Set(worker_id), + parallel_unit_ids: ActiveValue::Set(I32Array(derive_parallel_units( + *txn_id.as_ref().unwrap(), + 0, + add_property.worker_node_parallelism as _, + ))), + is_streaming: ActiveValue::Set(add_property.is_streaming), + is_serving: ActiveValue::Set(add_property.is_streaming), + is_unschedulable: ActiveValue::Set(add_property.is_streaming), + }; + WorkerProperty::insert(property).exec(&txn).await?; + } + + txn.commit().await?; + if let Some(txn_id) = txn_id { + self.available_transactional_ids.retain(|id| *id != txn_id); + } + self.worker_extra_info + .insert(worker_id, WorkerExtraInfo::default()); + + Ok(worker_id) + } + + pub async fn activate_worker(&self, worker_id: WorkerId) -> MetaResult { + let worker = worker::ActiveModel { + worker_id: ActiveValue::Set(worker_id), + status: ActiveValue::Set(WorkerStatus::Running), + ..Default::default() + }; + + let worker = worker.update(&self.db).await?; + let worker_property = WorkerProperty::find_by_id(worker.worker_id) + .one(&self.db) + .await?; + Ok(WorkerInfo(worker, worker_property).into()) + } + + pub async fn update_schedulability( + &self, + worker_ids: Vec, + schedulability: Schedulability, + ) -> MetaResult<()> { + let is_unschedulable = schedulability == Schedulability::Unschedulable; + WorkerProperty::update_many() + .col_expr( + worker_property::Column::IsUnschedulable, + Expr::value(is_unschedulable), + ) + .filter(worker_property::Column::WorkerId.is_in(worker_ids)) + .exec(&self.db) + .await?; + + Ok(()) + } + + pub async fn delete_worker(&mut self, host_addr: HostAddress) -> MetaResult { + let worker = Worker::find() + .filter( + worker::Column::Host + .eq(host_addr.host) + .and(worker::Column::Port.eq(host_addr.port)), + ) + .find_also_related(WorkerProperty) + .one(&self.db) + .await?; + let Some((worker, property)) = worker else { + return Err(MetaError::invalid_parameter("worker not found!")); + }; + + let res = Worker::delete_by_id(worker.worker_id) + .exec(&self.db) + .await?; + if res.rows_affected == 0 { + return Err(MetaError::invalid_parameter("worker not found!")); + } + + self.worker_extra_info.remove(&worker.worker_id); + if let Some(txn_id) = &worker.transaction_id { + self.available_transactional_ids.push_back(*txn_id); + } + Ok(WorkerInfo(worker, property).into()) + } + + pub fn heartbeat( + &mut self, + worker_id: WorkerId, + ttl: Duration, + info: Vec, + ) { + if let Some(worker_info) = self.worker_extra_info.get_mut(&worker_id) { + worker_info.update_ttl(ttl); + worker_info.update_hummock_info(info); + } + } + + pub async fn list_workers( + &self, + worker_type: WorkerType, + worker_status: Option, + ) -> MetaResult> { + let workers = if let Some(status) = worker_status { + Worker::find() + .filter( + worker::Column::WorkerType + .eq(worker_type) + .and(worker::Column::Status.eq(status)), + ) + .find_also_related(WorkerProperty) + .all(&self.db) + .await? + } else { + Worker::find() + .filter(worker::Column::WorkerType.eq(worker_type)) + .find_also_related(WorkerProperty) + .all(&self.db) + .await? + }; + + Ok(workers + .into_iter() + .map(|(worker, property)| WorkerInfo(worker, property).into()) + .collect_vec()) + } + + pub async fn list_active_streaming_workers(&self) -> MetaResult> { + let workers = Worker::find() + .filter( + worker::Column::WorkerType + .eq(WorkerType::ComputeNode) + .and(worker::Column::Status.eq(WorkerStatus::Running)), + ) + .inner_join(WorkerProperty) + .select_also(WorkerProperty) + .filter(worker_property::Column::IsStreaming.eq(true)) + .all(&self.db) + .await?; + + Ok(workers + .into_iter() + .map(|(worker, property)| WorkerInfo(worker, property).into()) + .collect_vec()) + } + + pub async fn list_active_parallel_units(&self) -> MetaResult> { + let parallel_units: Vec<(WorkerId, I32Array)> = WorkerProperty::find() + .select_only() + .column(worker_property::Column::WorkerId) + .column(worker_property::Column::ParallelUnitIds) + .inner_join(Worker) + .filter(worker::Column::Status.eq(WorkerStatus::Running)) + .into_tuple() + .all(&self.db) + .await?; + Ok(parallel_units + .into_iter() + .flat_map(|(id, pu)| { + pu.0.into_iter().map(move |parallel_unit_id| ParallelUnit { + id: parallel_unit_id as _, + worker_node_id: id, + }) + }) + .collect_vec()) + } + + pub async fn list_active_serving_workers(&self) -> MetaResult> { + let workers = Worker::find() + .filter( + worker::Column::WorkerType + .eq(WorkerType::ComputeNode) + .and(worker::Column::Status.eq(WorkerStatus::Running)), + ) + .inner_join(WorkerProperty) + .select_also(WorkerProperty) + .filter(worker_property::Column::IsServing.eq(true)) + .all(&self.db) + .await?; + + Ok(workers + .into_iter() + .map(|(worker, property)| WorkerInfo(worker, property).into()) + .collect_vec()) + } + + pub async fn get_streaming_cluster_info(&self) -> MetaResult { + let mut streaming_workers = self.list_active_streaming_workers().await?; + + let unschedulable_worker_node = streaming_workers + .extract_if(|worker| { + worker + .property + .as_ref() + .map_or(false, |p| p.is_unschedulable) + }) + .collect_vec(); + + let active_workers: HashMap<_, _> = + streaming_workers.into_iter().map(|w| (w.id, w)).collect(); + + let active_parallel_units = active_workers + .values() + .flat_map(|worker| worker.parallel_units.iter().map(|p| (p.id, p.clone()))) + .collect(); + + let unschedulable_parallel_units = unschedulable_worker_node + .iter() + .flat_map(|worker| worker.parallel_units.iter().map(|p| (p.id, p.clone()))) + .collect(); + + Ok(StreamingClusterInfo { + worker_nodes: active_workers, + parallel_units: active_parallel_units, + unschedulable_parallel_units, + }) + } + + pub async fn get_worker_by_id(&self, worker_id: WorkerId) -> MetaResult> { + let worker = Worker::find_by_id(worker_id) + .find_also_related(WorkerProperty) + .one(&self.db) + .await?; + Ok(worker.map(|(w, p)| WorkerInfo(w, p).into())) + } + + pub fn get_worker_extra_info_by_id(&self, worker_id: WorkerId) -> Option { + self.worker_extra_info.get(&worker_id).cloned() + } +} + +#[cfg(test)] +#[cfg(not(madsim))] +mod tests { + use super::*; + + fn mock_worker_hosts_for_test(count: usize) -> Vec { + (0..count) + .map(|i| HostAddress { + host: "localhost".to_string(), + port: 5000 + i as i32, + }) + .collect_vec() + } + + #[tokio::test] + async fn test_cluster_controller() -> MetaResult<()> { + let env = MetaSrvEnv::for_test().await; + let cluster_ctl = ClusterController::new(env, Duration::from_secs(1)).await?; + + let parallelism_num = 4_usize; + let worker_count = 5_usize; + let property = AddNodeProperty { + worker_node_parallelism: parallelism_num as _, + is_streaming: true, + is_serving: true, + is_unschedulable: false, + }; + let hosts = mock_worker_hosts_for_test(worker_count); + let mut worker_ids = vec![]; + for host in &hosts { + worker_ids.push( + cluster_ctl + .add_worker(PbWorkerType::ComputeNode, host.clone(), property.clone()) + .await?, + ); + } + + // Since no worker is active, the parallel unit count should be 0. + assert_eq!(cluster_ctl.list_active_parallel_units().await?.len(), 0); + + for id in &worker_ids { + cluster_ctl.activate_worker(*id).await?; + } + let worker_cnt_map = cluster_ctl.count_worker_by_type().await?; + assert_eq!( + *worker_cnt_map.get(&WorkerType::ComputeNode).unwrap() as usize, + worker_count + ); + assert_eq!( + cluster_ctl.list_active_streaming_workers().await?.len(), + worker_count + ); + assert_eq!( + cluster_ctl.list_active_serving_workers().await?.len(), + worker_count + ); + assert_eq!( + cluster_ctl.list_active_parallel_units().await?.len(), + parallelism_num * worker_count + ); + + // re-register existing worker node with larger parallelism and change its serving mode. + let mut new_property = property.clone(); + new_property.worker_node_parallelism = (parallelism_num * 2) as _; + new_property.is_serving = false; + cluster_ctl + .add_worker(PbWorkerType::ComputeNode, hosts[0].clone(), new_property) + .await?; + + assert_eq!( + cluster_ctl.list_active_streaming_workers().await?.len(), + worker_count + ); + assert_eq!( + cluster_ctl.list_active_serving_workers().await?.len(), + worker_count - 1 + ); + let parallel_units = cluster_ctl.list_active_parallel_units().await?; + assert!(parallel_units.iter().map(|pu| pu.id).all_unique()); + assert_eq!(parallel_units.len(), parallelism_num * (worker_count + 1)); + + // delete workers. + for host in hosts { + cluster_ctl.delete_worker(host).await?; + } + assert_eq!(cluster_ctl.list_active_streaming_workers().await?.len(), 0); + assert_eq!(cluster_ctl.list_active_serving_workers().await?.len(), 0); + assert_eq!(cluster_ctl.list_active_parallel_units().await?.len(), 0); + + Ok(()) + } + + #[tokio::test] + async fn test_update_schedulability() -> MetaResult<()> { + let env = MetaSrvEnv::for_test().await; + let cluster_ctl = ClusterController::new(env, Duration::from_secs(1)).await?; + + let host = HostAddress { + host: "localhost".to_string(), + port: 5001, + }; + let mut property = AddNodeProperty { + worker_node_parallelism: 4, + is_streaming: true, + is_serving: true, + is_unschedulable: false, + }; + let worker_id = cluster_ctl + .add_worker(PbWorkerType::ComputeNode, host.clone(), property.clone()) + .await?; + + cluster_ctl.activate_worker(worker_id).await?; + cluster_ctl + .update_schedulability(vec![worker_id], Schedulability::Unschedulable) + .await?; + + let workers = cluster_ctl.list_active_streaming_workers().await?; + assert_eq!(workers.len(), 1); + assert!(workers[0].property.as_ref().unwrap().is_unschedulable); + + // re-register existing worker node and change its serving mode, the schedulable state should not be changed. + property.is_unschedulable = false; + property.is_serving = false; + let new_worker_id = cluster_ctl + .add_worker(PbWorkerType::ComputeNode, host.clone(), property) + .await?; + assert_eq!(worker_id, new_worker_id); + + let workers = cluster_ctl.list_active_streaming_workers().await?; + assert_eq!(workers.len(), 1); + assert!(workers[0].property.as_ref().unwrap().is_unschedulable); + + cluster_ctl.delete_worker(host).await?; + + Ok(()) + } +} diff --git a/src/meta/src/controller/mod.rs b/src/meta/src/controller/mod.rs new file mode 100644 index 0000000000000..07793e30a17fe --- /dev/null +++ b/src/meta/src/controller/mod.rs @@ -0,0 +1,267 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use risingwave_common::util::epoch::Epoch; +use risingwave_pb::catalog::connection::PbInfo as PbConnectionInfo; +use risingwave_pb::catalog::source::PbOptionalAssociatedTableId; +use risingwave_pb::catalog::table::{PbOptionalAssociatedSourceId, PbTableType}; +use risingwave_pb::catalog::{ + PbConnection, PbCreateType, PbDatabase, PbHandleConflictBehavior, PbIndex, PbSchema, PbSink, + PbSinkType, PbSource, PbStreamJobStatus, PbTable, PbView, +}; +use sea_orm::{ActiveValue, DatabaseConnection, ModelTrait}; + +use crate::model_v2::{connection, database, index, object, schema, sink, source, table, view}; +use crate::MetaError; + +#[allow(dead_code)] +pub mod catalog; +pub mod cluster; +pub mod rename; +pub mod system_param; +pub mod utils; + +// todo: refine the error transform. +impl From for MetaError { + fn from(err: sea_orm::DbErr) -> Self { + if let Some(err) = err.sql_err() { + return anyhow!(err).into(); + } + anyhow!(err).into() + } +} + +#[derive(Clone)] +pub struct SqlMetaStore { + pub conn: DatabaseConnection, +} + +impl SqlMetaStore { + pub fn new(conn: DatabaseConnection) -> Self { + Self { conn } + } + + #[cfg(any(test, feature = "test"))] + #[cfg(not(madsim))] + pub async fn for_test() -> Self { + use model_migration::{Migrator, MigratorTrait}; + let conn = sea_orm::Database::connect("sqlite::memory:").await.unwrap(); + Migrator::up(&conn, None).await.unwrap(); + Self { conn } + } +} + +pub struct ObjectModel(M, object::Model); + +impl From> for PbDatabase { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.database_id, + name: value.0.name, + owner: value.1.owner_id, + } + } +} + +impl From for database::ActiveModel { + fn from(db: PbDatabase) -> Self { + Self { + database_id: ActiveValue::Set(db.id), + name: ActiveValue::Set(db.name), + } + } +} + +impl From for schema::ActiveModel { + fn from(schema: PbSchema) -> Self { + Self { + schema_id: ActiveValue::Set(schema.id), + name: ActiveValue::Set(schema.name), + } + } +} + +impl From> for PbSchema { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.schema_id, + name: value.0.name, + database_id: value.1.database_id.unwrap(), + owner: value.1.owner_id, + } + } +} + +impl From> for PbTable { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.table_id, + schema_id: value.1.schema_id.unwrap(), + database_id: value.1.database_id.unwrap(), + name: value.0.name, + columns: value.0.columns.0, + pk: value.0.pk.0, + dependent_relations: vec![], // todo: deprecate it. + table_type: PbTableType::from(value.0.table_type) as _, + distribution_key: value.0.distribution_key.0, + stream_key: value.0.stream_key.0, + append_only: value.0.append_only, + owner: value.1.owner_id, + properties: value.0.properties.0, + fragment_id: value.0.fragment_id as u32, + vnode_col_index: value.0.vnode_col_index, + row_id_index: value.0.row_id_index, + value_indices: value.0.value_indices.0, + definition: value.0.definition, + handle_pk_conflict_behavior: PbHandleConflictBehavior::from( + value.0.handle_pk_conflict_behavior, + ) as _, + read_prefix_len_hint: value.0.read_prefix_len_hint, + watermark_indices: value.0.watermark_indices.0, + dist_key_in_pk: value.0.dist_key_in_pk.0, + dml_fragment_id: value.0.dml_fragment_id.map(|id| id as u32), + cardinality: value.0.cardinality.map(|cardinality| cardinality.0), + initialized_at_epoch: Some( + Epoch::from_unix_millis(value.1.initialized_at.timestamp_millis() as _).0, + ), + created_at_epoch: Some( + Epoch::from_unix_millis(value.1.created_at.timestamp_millis() as _).0, + ), + cleaned_by_watermark: value.0.cleaned_by_watermark, + stream_job_status: PbStreamJobStatus::from(value.0.job_status) as _, + create_type: PbCreateType::from(value.0.create_type) as _, + version: Some(value.0.version.0), + optional_associated_source_id: value + .0 + .optional_associated_source_id + .map(PbOptionalAssociatedSourceId::AssociatedSourceId), + } + } +} + +impl From> for PbSource { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.source_id, + schema_id: value.1.schema_id.unwrap(), + database_id: value.1.database_id.unwrap(), + name: value.0.name, + row_id_index: value.0.row_id_index, + columns: value.0.columns.0, + pk_column_ids: value.0.pk_column_ids.0, + properties: value.0.properties.0, + owner: value.1.owner_id, + info: value.0.source_info.map(|info| info.0), + watermark_descs: value.0.watermark_descs.0, + definition: value.0.definition, + connection_id: value.0.connection_id, + // todo: using the timestamp from the database directly. + initialized_at_epoch: Some( + Epoch::from_unix_millis(value.1.initialized_at.timestamp_millis() as _).0, + ), + created_at_epoch: Some( + Epoch::from_unix_millis(value.1.created_at.timestamp_millis() as _).0, + ), + version: value.0.version, + optional_associated_table_id: value + .0 + .optional_associated_table_id + .map(PbOptionalAssociatedTableId::AssociatedTableId), + } + } +} + +impl From> for PbSink { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.sink_id, + schema_id: value.1.schema_id.unwrap(), + database_id: value.1.database_id.unwrap(), + name: value.0.name, + columns: value.0.columns.0, + plan_pk: value.0.plan_pk.0, + dependent_relations: vec![], // todo: deprecate it. + distribution_key: value.0.distribution_key.0, + downstream_pk: value.0.downstream_pk.0, + sink_type: PbSinkType::from(value.0.sink_type) as _, + owner: value.1.owner_id, + properties: value.0.properties.0, + definition: value.0.definition, + connection_id: value.0.connection_id, + initialized_at_epoch: Some( + Epoch::from_unix_millis(value.1.initialized_at.timestamp_millis() as _).0, + ), + created_at_epoch: Some( + Epoch::from_unix_millis(value.1.created_at.timestamp_millis() as _).0, + ), + db_name: value.0.db_name, + sink_from_name: value.0.sink_from_name, + stream_job_status: PbStreamJobStatus::from(value.0.job_status) as _, + format_desc: value.0.sink_format_desc.map(|desc| desc.0), + } + } +} + +impl From> for PbIndex { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.index_id, + schema_id: value.1.schema_id.unwrap(), + database_id: value.1.database_id.unwrap(), + name: value.0.name, + owner: value.1.owner_id, + index_table_id: value.0.index_table_id, + primary_table_id: value.0.primary_table_id, + index_item: value.0.index_items.0, + original_columns: value.0.original_columns.0, + initialized_at_epoch: Some( + Epoch::from_unix_millis(value.1.initialized_at.timestamp_millis() as _).0, + ), + created_at_epoch: Some( + Epoch::from_unix_millis(value.1.created_at.timestamp_millis() as _).0, + ), + stream_job_status: PbStreamJobStatus::from(value.0.job_status) as _, + } + } +} + +impl From> for PbView { + fn from(value: ObjectModel) -> Self { + Self { + id: value.0.view_id, + schema_id: value.1.schema_id.unwrap(), + database_id: value.1.database_id.unwrap(), + name: value.0.name, + owner: value.1.owner_id, + properties: value.0.properties.0, + sql: value.0.definition, + dependent_relations: vec![], // todo: deprecate it. + columns: value.0.columns.0, + } + } +} + +impl From> for PbConnection { + fn from(value: ObjectModel) -> Self { + Self { + id: value.1.oid, + schema_id: value.1.schema_id.unwrap(), + database_id: value.1.database_id.unwrap(), + name: value.0.name, + owner: value.1.owner_id, + info: Some(PbConnectionInfo::PrivateLinkService(value.0.info.0)), + } + } +} diff --git a/src/meta/src/controller/rename.rs b/src/meta/src/controller/rename.rs new file mode 100644 index 0000000000000..254565efb391c --- /dev/null +++ b/src/meta/src/controller/rename.rs @@ -0,0 +1,430 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use risingwave_common::util::column_index_mapping::ColIndexMapping; +use risingwave_pb::expr::expr_node::RexNode; +use risingwave_pb::expr::{ExprNode, FunctionCall, UserDefinedFunction}; +use risingwave_sqlparser::ast::{ + Array, CreateSink, CreateSinkStatement, CreateSourceStatement, Distinct, Expr, Function, + FunctionArg, FunctionArgExpr, Ident, ObjectName, Query, SelectItem, SetExpr, Statement, + TableAlias, TableFactor, TableWithJoins, +}; +use risingwave_sqlparser::parser::Parser; + +/// `alter_relation_rename` renames a relation to a new name in its `Create` statement, and returns +/// the updated definition raw sql. Note that the `definition` must be a `Create` statement and the +/// `new_name` must be a valid identifier, it should be validated before calling this function. To +/// update all relations that depend on the renamed one, use `alter_relation_rename_refs`. +pub fn alter_relation_rename(definition: &str, new_name: &str) -> String { + // This happens when we try to rename a table that's created by `CREATE TABLE AS`. Remove it + // when we support `SHOW CREATE TABLE` for `CREATE TABLE AS`. + if definition.is_empty() { + tracing::warn!("found empty definition when renaming relation, ignored."); + return definition.into(); + } + let ast = Parser::parse_sql(definition).expect("failed to parse relation definition"); + let mut stmt = ast + .into_iter() + .exactly_one() + .expect("should contains only one statement"); + + match &mut stmt { + Statement::CreateTable { name, .. } + | Statement::CreateView { name, .. } + | Statement::CreateIndex { name, .. } + | Statement::CreateSource { + stmt: CreateSourceStatement { + source_name: name, .. + }, + } + | Statement::CreateSink { + stmt: CreateSinkStatement { + sink_name: name, .. + }, + } => replace_table_name(name, new_name), + _ => unreachable!(), + }; + + stmt.to_string() +} + +/// `alter_relation_rename_refs` updates all references of renamed-relation in the definition of +/// target relation's `Create` statement. +pub fn alter_relation_rename_refs(definition: &str, from: &str, to: &str) -> String { + let ast = Parser::parse_sql(definition).expect("failed to parse relation definition"); + let mut stmt = ast + .into_iter() + .exactly_one() + .expect("should contains only one statement"); + + match &mut stmt { + Statement::CreateTable { + query: Some(query), .. + } + | Statement::CreateView { query, .. } + | Statement::Query(query) // Used by view, actually we store a query as the definition of view. + | Statement::CreateSink { + stmt: + CreateSinkStatement { + sink_from: CreateSink::AsQuery(query), + .. + }, + } => { + QueryRewriter::rewrite_query(query, from, to); + } + Statement::CreateIndex { table_name, .. } + | Statement::CreateSink { + stmt: + CreateSinkStatement { + sink_from: CreateSink::From(table_name), + .. + }, + } => replace_table_name(table_name, to), + _ => unreachable!(), + }; + stmt.to_string() +} + +/// Replace the last ident in the `table_name` with the given name, the object name is ensured to be +/// non-empty. e.g. `schema.table` or `database.schema.table`. +fn replace_table_name(table_name: &mut ObjectName, to: &str) { + let idx = table_name.0.len() - 1; + table_name.0[idx] = Ident::new_unchecked(to); +} + +/// `QueryRewriter` is a visitor that updates all references of relation named `from` to `to` in the +/// given query, which is the part of create statement of `relation`. +struct QueryRewriter<'a> { + from: &'a str, + to: &'a str, +} + +impl QueryRewriter<'_> { + fn rewrite_query(query: &mut Query, from: &str, to: &str) { + let rewriter = QueryRewriter { from, to }; + rewriter.visit_query(query) + } + + /// Visit the query and update all references of relation named `from` to `to`. + fn visit_query(&self, query: &mut Query) { + if let Some(with) = &mut query.with { + for cte_table in &mut with.cte_tables { + self.visit_query(&mut cte_table.query); + } + } + self.visit_set_expr(&mut query.body); + for expr in &mut query.order_by { + self.visit_expr(&mut expr.expr); + } + } + + /// Visit table factor and update all references of relation named `from` to `to`. + /// Rewrite idents(i.e. `schema.table`, `table`) that contains the old name in the + /// following pattern: + /// 1. `FROM a` to `FROM new_a AS a` + /// 2. `FROM a AS b` to `FROM new_a AS b` + /// + /// So that we DON'T have to: + /// 1. rewrite the select and expr part like `schema.table.column`, `table.column`, + /// `alias.column` etc. + /// 2. handle the case that the old name is used as alias. + /// 3. handle the case that the new name is used as alias. + fn visit_table_factor(&self, table_factor: &mut TableFactor) { + match table_factor { + TableFactor::Table { name, alias, .. } => { + let idx = name.0.len() - 1; + if name.0[idx].real_value() == self.from { + if alias.is_none() { + *alias = Some(TableAlias { + name: Ident::new_unchecked(self.from), + columns: vec![], + }); + } + name.0[idx] = Ident::new_unchecked(self.to); + } + } + TableFactor::Derived { subquery, .. } => self.visit_query(subquery), + TableFactor::TableFunction { args, .. } => { + for arg in args { + self.visit_function_args(arg); + } + } + TableFactor::NestedJoin(table_with_joins) => { + self.visit_table_with_joins(table_with_joins); + } + } + } + + /// Visit table with joins and update all references of relation named `from` to `to`. + fn visit_table_with_joins(&self, table_with_joins: &mut TableWithJoins) { + self.visit_table_factor(&mut table_with_joins.relation); + for join in &mut table_with_joins.joins { + self.visit_table_factor(&mut join.relation); + } + } + + /// Visit query body expression and update all references. + fn visit_set_expr(&self, set_expr: &mut SetExpr) { + match set_expr { + SetExpr::Select(select) => { + if let Distinct::DistinctOn(exprs) = &mut select.distinct { + for expr in exprs { + self.visit_expr(expr); + } + } + for select_item in &mut select.projection { + self.visit_select_item(select_item); + } + for from_item in &mut select.from { + self.visit_table_with_joins(from_item); + } + if let Some(where_clause) = &mut select.selection { + self.visit_expr(where_clause); + } + for expr in &mut select.group_by { + self.visit_expr(expr); + } + if let Some(having) = &mut select.having { + self.visit_expr(having); + } + } + SetExpr::Query(query) => self.visit_query(query), + SetExpr::SetOperation { left, right, .. } => { + self.visit_set_expr(left); + self.visit_set_expr(right); + } + SetExpr::Values(_) => {} + } + } + + /// Visit function arguments and update all references. + fn visit_function_args(&self, function_args: &mut FunctionArg) { + match function_args { + FunctionArg::Unnamed(arg) | FunctionArg::Named { arg, .. } => match arg { + FunctionArgExpr::Expr(expr) | FunctionArgExpr::ExprQualifiedWildcard(expr, _) => { + self.visit_expr(expr) + } + FunctionArgExpr::QualifiedWildcard(_, None) | FunctionArgExpr::Wildcard(None) => {} + FunctionArgExpr::QualifiedWildcard(_, Some(exprs)) + | FunctionArgExpr::Wildcard(Some(exprs)) => { + for expr in exprs { + self.visit_expr(expr); + } + } + }, + } + } + + /// Visit function and update all references. + fn visit_function(&self, function: &mut Function) { + for arg in &mut function.args { + self.visit_function_args(arg); + } + } + + /// Visit expression and update all references. + fn visit_expr(&self, expr: &mut Expr) { + match expr { + Expr::FieldIdentifier(expr, ..) + | Expr::IsNull(expr) + | Expr::IsNotNull(expr) + | Expr::IsTrue(expr) + | Expr::IsNotTrue(expr) + | Expr::IsFalse(expr) + | Expr::IsNotFalse(expr) + | Expr::IsUnknown(expr) + | Expr::IsNotUnknown(expr) + | Expr::IsJson { expr, .. } + | Expr::InList { expr, .. } + | Expr::SomeOp(expr) + | Expr::AllOp(expr) + | Expr::UnaryOp { expr, .. } + | Expr::Cast { expr, .. } + | Expr::TryCast { expr, .. } + | Expr::AtTimeZone { + timestamp: expr, .. + } + | Expr::Extract { expr, .. } + | Expr::Substring { expr, .. } + | Expr::Overlay { expr, .. } + | Expr::Trim { expr, .. } + | Expr::Nested(expr) + | Expr::ArrayIndex { obj: expr, .. } + | Expr::ArrayRangeIndex { obj: expr, .. } => self.visit_expr(expr), + + Expr::Position { substring, string } => { + self.visit_expr(substring); + self.visit_expr(string); + } + + Expr::InSubquery { expr, subquery, .. } => { + self.visit_expr(expr); + self.visit_query(subquery); + } + Expr::Between { + expr, low, high, .. + } => { + self.visit_expr(expr); + self.visit_expr(low); + self.visit_expr(high); + } + + Expr::IsDistinctFrom(expr1, expr2) + | Expr::IsNotDistinctFrom(expr1, expr2) + | Expr::BinaryOp { + left: expr1, + right: expr2, + .. + } => { + self.visit_expr(expr1); + self.visit_expr(expr2); + } + Expr::Function(function) => self.visit_function(function), + Expr::Exists(query) | Expr::Subquery(query) | Expr::ArraySubquery(query) => { + self.visit_query(query) + } + + Expr::GroupingSets(exprs_vec) | Expr::Cube(exprs_vec) | Expr::Rollup(exprs_vec) => { + for exprs in exprs_vec { + for expr in exprs { + self.visit_expr(expr); + } + } + } + + Expr::Row(exprs) | Expr::Array(Array { elem: exprs, .. }) => { + for expr in exprs { + self.visit_expr(expr); + } + } + + Expr::LambdaFunction { body, args: _ } => self.visit_expr(body), + + // No need to visit. + Expr::Identifier(_) + | Expr::CompoundIdentifier(_) + | Expr::Collate { .. } + | Expr::Value(_) + | Expr::Parameter { .. } + | Expr::TypedString { .. } + | Expr::Case { .. } => {} + } + } + + /// Visit select item and update all references. + fn visit_select_item(&self, select_item: &mut SelectItem) { + match select_item { + SelectItem::UnnamedExpr(expr) + | SelectItem::ExprQualifiedWildcard(expr, _) + | SelectItem::ExprWithAlias { expr, .. } => self.visit_expr(expr), + SelectItem::QualifiedWildcard(_, None) | SelectItem::Wildcard(None) => {} + SelectItem::QualifiedWildcard(_, Some(exprs)) | SelectItem::Wildcard(Some(exprs)) => { + for expr in exprs { + self.visit_expr(expr); + } + } + } + } +} + +pub struct ReplaceTableExprRewriter { + pub table_col_index_mapping: ColIndexMapping, +} + +impl ReplaceTableExprRewriter { + pub fn rewrite_expr(&self, expr: &mut ExprNode) { + let rex_node = expr.rex_node.as_mut().unwrap(); + match rex_node { + RexNode::InputRef(input_col_idx) => { + *input_col_idx = self.table_col_index_mapping.map(*input_col_idx as usize) as u32 + } + RexNode::Constant(_) => {} + RexNode::Udf(udf) => self.rewrite_udf(udf), + RexNode::FuncCall(function_call) => self.rewrite_function_call(function_call), + RexNode::Now(_) => {} + } + } + + fn rewrite_udf(&self, udf: &mut UserDefinedFunction) { + udf.children + .iter_mut() + .for_each(|expr| self.rewrite_expr(expr)); + } + + fn rewrite_function_call(&self, function_call: &mut FunctionCall) { + function_call + .children + .iter_mut() + .for_each(|expr| self.rewrite_expr(expr)); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_alter_table_rename() { + let definition = "CREATE TABLE foo (a int, b int)"; + let new_name = "bar"; + let expected = "CREATE TABLE bar (a INT, b INT)"; + let actual = alter_relation_rename(definition, new_name); + assert_eq!(expected, actual); + } + + #[test] + fn test_rename_index_refs() { + let definition = "CREATE INDEX idx1 ON foo(v1 DESC, v2)"; + let from = "foo"; + let to = "bar"; + let expected = "CREATE INDEX idx1 ON bar(v1 DESC, v2)"; + let actual = alter_relation_rename_refs(definition, from, to); + assert_eq!(expected, actual); + } + + #[test] + fn test_rename_sink_refs() { + let definition = + "CREATE SINK sink_t FROM foo WITH (connector = 'kafka', format = 'append_only')"; + let from = "foo"; + let to = "bar"; + let expected = + "CREATE SINK sink_t FROM bar WITH (connector = 'kafka', format = 'append_only')"; + let actual = alter_relation_rename_refs(definition, from, to); + assert_eq!(expected, actual); + } + + #[test] + fn test_rename_with_alias_refs() { + let definition = + "CREATE MATERIALIZED VIEW mv1 AS SELECT foo.v1 AS m1v, foo.v2 AS m2v FROM foo"; + let from = "foo"; + let to = "bar"; + let expected = + "CREATE MATERIALIZED VIEW mv1 AS SELECT foo.v1 AS m1v, foo.v2 AS m2v FROM bar AS foo"; + let actual = alter_relation_rename_refs(definition, from, to); + assert_eq!(expected, actual); + + let definition = "CREATE MATERIALIZED VIEW mv1 AS SELECT foo.v1 AS m1v, (foo.v2).v3 AS m2v FROM foo WHERE foo.v1 = 1 AND (foo.v2).v3 IS TRUE"; + let expected = "CREATE MATERIALIZED VIEW mv1 AS SELECT foo.v1 AS m1v, (foo.v2).v3 AS m2v FROM bar AS foo WHERE foo.v1 = 1 AND (foo.v2).v3 IS TRUE"; + let actual = alter_relation_rename_refs(definition, from, to); + assert_eq!(expected, actual); + + let definition = "CREATE MATERIALIZED VIEW mv1 AS SELECT bar.v1 AS m1v, (bar.v2).v3 AS m2v FROM foo AS bar WHERE bar.v1 = 1"; + let expected = "CREATE MATERIALIZED VIEW mv1 AS SELECT bar.v1 AS m1v, (bar.v2).v3 AS m2v FROM bar AS bar WHERE bar.v1 = 1"; + let actual = alter_relation_rename_refs(definition, from, to); + assert_eq!(expected, actual); + } +} diff --git a/src/meta/src/controller/system_param.rs b/src/meta/src/controller/system_param.rs new file mode 100644 index 0000000000000..0656da5ea9a46 --- /dev/null +++ b/src/meta/src/controller/system_param.rs @@ -0,0 +1,316 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; +use std::time::Duration; + +use anyhow::anyhow; +use risingwave_common::system_param::reader::SystemParamsReader; +use risingwave_common::system_param::{ + check_missing_params, derive_missing_fields, set_system_param, +}; +use risingwave_common::{for_all_params, key_of}; +use risingwave_pb::meta::subscribe_response::{Info, Operation}; +use risingwave_pb::meta::PbSystemParams; +use sea_orm::{ActiveModelTrait, ActiveValue, DatabaseConnection, EntityTrait, TransactionTrait}; +use tokio::sync::oneshot::Sender; +use tokio::sync::RwLock; +use tokio::task::JoinHandle; +use tracing::info; + +use crate::controller::SqlMetaStore; +use crate::manager::{LocalNotification, NotificationManagerRef}; +use crate::model_v2::prelude::SystemParameter; +use crate::model_v2::system_parameter; +use crate::{MetaError, MetaResult}; + +pub type SystemParamsControllerRef = Arc; + +pub struct SystemParamsController { + db: DatabaseConnection, + // Notify workers and local subscribers of parameter change. + notification_manager: NotificationManagerRef, + // Cached parameters. + params: RwLock, +} + +/// Derive system params from db models. +macro_rules! impl_system_params_from_db { + ($({ $field:ident, $type:ty, $default:expr, $is_mutable:expr },)*) => { + /// Try to deserialize deprecated fields as well. + /// Warn if there are unrecognized fields. + pub fn system_params_from_db(mut models: Vec) -> MetaResult { + let mut params = PbSystemParams::default(); + models.retain(|model| { + match model.name.as_str() { + $( + key_of!($field) => { + params.$field = Some(model.value.parse::<$type>().unwrap()); + false + } + )* + _ => true, + } + }); + derive_missing_fields(&mut params); + if !models.is_empty() { + let unrecognized_params = models.into_iter().map(|model| model.name).collect::>(); + tracing::warn!("unrecognized system params {:?}", unrecognized_params); + } + Ok(params) + } + }; +} + +/// Derive serialization to db models. +macro_rules! impl_system_params_to_models { + ($({ $field:ident, $type:ty, $default:expr, $is_mutable:expr },)*) => { + #[allow(clippy::vec_init_then_push)] + pub fn system_params_to_model(params: &PbSystemParams) -> MetaResult> { + check_missing_params(params).map_err(|e| anyhow!(e))?; + let mut models = Vec::new(); + $( + let value = params.$field.as_ref().unwrap().to_string(); + models.push(system_parameter::ActiveModel { + name: ActiveValue::Set(key_of!($field).to_string()), + value: ActiveValue::Set(value), + is_mutable: ActiveValue::Set($is_mutable), + description: ActiveValue::Set(None), + }); + )* + Ok(models) + } + }; +} + +// For each field in `persisted` and `init` +// 1. Some, None: The persisted field is deprecated, so just ignore it. +// 2. Some, Some: Check equality and warn if they differ. +// 3. None, Some: A new version of RW cluster is launched for the first time and newly introduced +// params are not set. Use init value. +// 4. None, None: A new version of RW cluster is launched for the first time and newly introduced +// params are not set. The new field is not initialized either, just leave it as `None`. +macro_rules! impl_merge_params { + ($({ $field:ident, $type:ty, $default:expr, $is_mutable:expr },)*) => { + fn merge_params(mut persisted: PbSystemParams, init: PbSystemParams) -> PbSystemParams { + $( + match (persisted.$field.as_ref(), init.$field) { + (Some(persisted), Some(init)) => { + if persisted != &init { + tracing::warn!( + "The initializing value of \"{:?}\" ({}) differ from persisted ({}), using persisted value", + key_of!($field), + init, + persisted + ); + } + }, + (None, Some(init)) => persisted.$field = Some(init), + _ => {}, + } + )* + persisted + } + }; +} + +for_all_params!(impl_system_params_from_db); +for_all_params!(impl_merge_params); +for_all_params!(impl_system_params_to_models); + +impl SystemParamsController { + pub async fn new( + sql_meta_store: SqlMetaStore, + notification_manager: NotificationManagerRef, + init_params: PbSystemParams, + ) -> MetaResult { + let db = sql_meta_store.conn; + let params = SystemParameter::find().all(&db).await?; + let params = merge_params(system_params_from_db(params)?, init_params); + + info!("system parameters: {:?}", params); + check_missing_params(¶ms).map_err(|e| anyhow!(e))?; + + let ctl = Self { + db, + notification_manager, + params: RwLock::new(params), + }; + // flush to db. + ctl.flush_params().await?; + + Ok(ctl) + } + + pub async fn get_pb_params(&self) -> PbSystemParams { + self.params.read().await.clone() + } + + pub async fn get_params(&self) -> SystemParamsReader { + self.params.read().await.clone().into() + } + + async fn flush_params(&self) -> MetaResult<()> { + let params = self.params.read().await; + let models = system_params_to_model(¶ms)?; + let txn = self.db.begin().await?; + // delete all params first and then insert all params. It follows the same logic + // as the old code, we'd better change it to another way later to keep consistency. + SystemParameter::delete_many().exec(&txn).await?; + + for model in models { + model.insert(&txn).await?; + } + txn.commit().await?; + Ok(()) + } + + pub async fn set_param(&self, name: &str, value: Option) -> MetaResult { + let mut params_guard = self.params.write().await; + + let Some(param) = SystemParameter::find_by_id(name.to_string()) + .one(&self.db) + .await? + else { + return Err(MetaError::system_param(format!( + "unrecognized system parameter {}", + name + ))); + }; + let mut params = params_guard.clone(); + let mut param: system_parameter::ActiveModel = param.into(); + param.value = ActiveValue::Set( + set_system_param(&mut params, name, value).map_err(MetaError::system_param)?, + ); + param.update(&self.db).await?; + *params_guard = params.clone(); + + // Sync params to other managers on the meta node only once, since it's infallible. + self.notification_manager + .notify_local_subscribers(LocalNotification::SystemParamsChange(params.clone().into())) + .await; + + // Sync params to worker nodes. + self.notify_workers(¶ms).await; + + Ok(params) + } + + // Periodically sync params to worker nodes. + pub fn start_params_notifier( + system_params_controller: Arc, + ) -> (JoinHandle<()>, Sender<()>) { + const NOTIFY_INTERVAL: Duration = Duration::from_millis(5000); + + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); + let join_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(NOTIFY_INTERVAL); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + loop { + tokio::select! { + _ = interval.tick() => {}, + _ = &mut shutdown_rx => { + tracing::info!("System params notifier is stopped"); + return; + } + } + system_params_controller + .notify_workers(&*system_params_controller.params.read().await) + .await; + } + }); + + (join_handle, shutdown_tx) + } + + // Notify workers of parameter change. + async fn notify_workers(&self, params: &PbSystemParams) { + self.notification_manager + .notify_frontend(Operation::Update, Info::SystemParams(params.clone())) + .await; + self.notification_manager + .notify_compute(Operation::Update, Info::SystemParams(params.clone())) + .await; + self.notification_manager + .notify_compactor(Operation::Update, Info::SystemParams(params.clone())) + .await; + } +} + +#[cfg(test)] +mod tests { + use risingwave_common::system_param::system_params_for_test; + + use super::*; + use crate::manager::MetaSrvEnv; + + #[tokio::test] + #[cfg(not(madsim))] + async fn test_system_params() { + let env = MetaSrvEnv::for_test().await; + let meta_store = env.sql_meta_store().unwrap(); + let init_params = system_params_for_test(); + + // init system parameter controller as first launch. + let system_param_ctl = SystemParamsController::new( + meta_store.clone(), + env.notification_manager_ref(), + init_params.clone(), + ) + .await + .unwrap(); + let params = system_param_ctl.get_pb_params().await; + assert_eq!(params, system_params_for_test()); + + // set parameter. + let new_params = system_param_ctl + .set_param("pause_on_next_bootstrap", Some("true".into())) + .await + .unwrap(); + + // insert deprecated params. + let deprecated_param = system_parameter::ActiveModel { + name: ActiveValue::Set("deprecated_param".into()), + value: ActiveValue::Set("foo".into()), + is_mutable: ActiveValue::Set(true), + description: ActiveValue::Set(None), + }; + deprecated_param.insert(&system_param_ctl.db).await.unwrap(); + + // init system parameter controller as not first launch. + let system_param_ctl = SystemParamsController::new( + meta_store, + env.notification_manager_ref(), + init_params.clone(), + ) + .await + .unwrap(); + // check deprecated params are cleaned up. + assert!(SystemParameter::find_by_id("deprecated_param".to_string()) + .one(&system_param_ctl.db) + .await + .unwrap() + .is_none()); + // check new params are set. + let params = system_param_ctl.get_pb_params().await; + assert_eq!(params, new_params); + // check db consistency. + let models = SystemParameter::find() + .all(&system_param_ctl.db) + .await + .unwrap(); + let db_params = system_params_from_db(models).unwrap(); + assert_eq!(db_params, new_params); + } +} diff --git a/src/meta/src/controller/utils.rs b/src/meta/src/controller/utils.rs new file mode 100644 index 0000000000000..d36918db3820d --- /dev/null +++ b/src/meta/src/controller/utils.rs @@ -0,0 +1,356 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use model_migration::WithQuery; +use risingwave_pb::catalog::{PbConnection, PbFunction}; +use sea_orm::sea_query::{ + Alias, CommonTableExpression, Expr, Query, QueryStatementBuilder, SelectStatement, UnionType, + WithClause, +}; +use sea_orm::{ + ColumnTrait, ConnectionTrait, DerivePartialModel, EntityTrait, FromQueryResult, JoinType, + Order, PaginatorTrait, QueryFilter, QuerySelect, RelationTrait, Statement, +}; + +use crate::model_v2::object::ObjectType; +use crate::model_v2::prelude::*; +use crate::model_v2::{ + connection, function, index, object, object_dependency, schema, sink, source, table, view, + DataTypeArray, DatabaseId, ObjectId, SchemaId, UserId, +}; +use crate::{MetaError, MetaResult}; + +/// This function will construct a query using recursive cte to find all objects[(id, `obj_type`)] that are used by the given object. +/// +/// # Examples +/// +/// ``` +/// use risingwave_meta::controller::utils::construct_obj_dependency_query; +/// use sea_orm::sea_query::*; +/// use sea_orm::*; +/// +/// let query = construct_obj_dependency_query(1); +/// +/// assert_eq!( +/// query.to_string(MysqlQueryBuilder), +/// r#"WITH RECURSIVE `used_by_object_ids` (`used_by`) AS (SELECT `used_by` FROM `object_dependency` WHERE `object_dependency`.`oid` = 1 UNION ALL (SELECT `object_dependency`.`used_by` FROM `object_dependency` INNER JOIN `used_by_object_ids` ON `used_by_object_ids`.`used_by` = `oid`)) SELECT DISTINCT `oid`, `obj_type`, `schema_id`, `database_id` FROM `used_by_object_ids` INNER JOIN `object` ON `used_by_object_ids`.`used_by` = `oid` ORDER BY `oid` DESC"# +/// ); +/// assert_eq!( +/// query.to_string(PostgresQueryBuilder), +/// r#"WITH RECURSIVE "used_by_object_ids" ("used_by") AS (SELECT "used_by" FROM "object_dependency" WHERE "object_dependency"."oid" = 1 UNION ALL (SELECT "object_dependency"."used_by" FROM "object_dependency" INNER JOIN "used_by_object_ids" ON "used_by_object_ids"."used_by" = "oid")) SELECT DISTINCT "oid", "obj_type", "schema_id", "database_id" FROM "used_by_object_ids" INNER JOIN "object" ON "used_by_object_ids"."used_by" = "oid" ORDER BY "oid" DESC"# +/// ); +/// assert_eq!( +/// query.to_string(SqliteQueryBuilder), +/// r#"WITH RECURSIVE "used_by_object_ids" ("used_by") AS (SELECT "used_by" FROM "object_dependency" WHERE "object_dependency"."oid" = 1 UNION ALL SELECT "object_dependency"."used_by" FROM "object_dependency" INNER JOIN "used_by_object_ids" ON "used_by_object_ids"."used_by" = "oid") SELECT DISTINCT "oid", "obj_type", "schema_id", "database_id" FROM "used_by_object_ids" INNER JOIN "object" ON "used_by_object_ids"."used_by" = "oid" ORDER BY "oid" DESC"# +/// ); +/// ``` +pub fn construct_obj_dependency_query(obj_id: ObjectId) -> WithQuery { + let cte_alias = Alias::new("used_by_object_ids"); + let cte_return_alias = Alias::new("used_by"); + + let mut base_query = SelectStatement::new() + .column(object_dependency::Column::UsedBy) + .from(ObjectDependency) + .and_where(object_dependency::Column::Oid.eq(obj_id)) + .to_owned(); + + let cte_referencing = Query::select() + .column((ObjectDependency, object_dependency::Column::UsedBy)) + .from(ObjectDependency) + .inner_join( + cte_alias.clone(), + Expr::col((cte_alias.clone(), cte_return_alias.clone())) + .equals(object_dependency::Column::Oid), + ) + .to_owned(); + + let common_table_expr = CommonTableExpression::new() + .query(base_query.union(UnionType::All, cte_referencing).to_owned()) + .column(cte_return_alias.clone()) + .table_name(cte_alias.clone()) + .to_owned(); + + SelectStatement::new() + .distinct() + .columns([ + object::Column::Oid, + object::Column::ObjType, + object::Column::SchemaId, + object::Column::DatabaseId, + ]) + .from(cte_alias.clone()) + .inner_join( + Object, + Expr::col((cte_alias, cte_return_alias.clone())).equals(object::Column::Oid), + ) + .order_by(object::Column::Oid, Order::Desc) + .to_owned() + .with( + WithClause::new() + .recursive(true) + .cte(common_table_expr) + .to_owned(), + ) + .to_owned() +} + +#[derive(Clone, DerivePartialModel, FromQueryResult)] +#[sea_orm(entity = "Object")] +pub struct PartialObject { + pub oid: ObjectId, + pub obj_type: ObjectType, + pub schema_id: Option, + pub database_id: Option, +} + +/// List all objects that are using the given one in a cascade way. It runs a recursive CTE to find all the dependencies. +pub async fn get_referring_objects_cascade( + obj_id: ObjectId, + db: &C, +) -> MetaResult> +where + C: ConnectionTrait, +{ + let query = construct_obj_dependency_query(obj_id); + let (sql, values) = query.build_any(&*db.get_database_backend().get_query_builder()); + let objects = PartialObject::find_by_statement(Statement::from_sql_and_values( + db.get_database_backend(), + sql, + values, + )) + .all(db) + .await?; + Ok(objects) +} + +/// `ensure_object_id` ensures the existence of target object in the cluster. +pub async fn ensure_object_id( + object_type: ObjectType, + obj_id: ObjectId, + db: &C, +) -> MetaResult<()> +where + C: ConnectionTrait, +{ + let count = Object::find_by_id(obj_id).count(db).await?; + if count == 0 { + return Err(MetaError::catalog_id_not_found( + object_type.as_str(), + obj_id, + )); + } + Ok(()) +} + +/// `ensure_user_id` ensures the existence of target user in the cluster. +pub async fn ensure_user_id(user_id: UserId, db: &C) -> MetaResult<()> +where + C: ConnectionTrait, +{ + let count = User::find_by_id(user_id).count(db).await?; + if count == 0 { + return Err(anyhow!("user {} was concurrently dropped", user_id).into()); + } + Ok(()) +} + +/// `check_function_signature_duplicate` checks whether the function name and its signature is already used in the target namespace. +pub async fn check_function_signature_duplicate( + pb_function: &PbFunction, + db: &C, +) -> MetaResult<()> +where + C: ConnectionTrait, +{ + let count = Function::find() + .inner_join(Object) + .filter( + object::Column::DatabaseId + .eq(pb_function.database_id as DatabaseId) + .and(object::Column::SchemaId.eq(pb_function.schema_id as SchemaId)) + .and(function::Column::Name.eq(&pb_function.name)) + .and(function::Column::ArgTypes.eq(DataTypeArray(pb_function.arg_types.clone()))), + ) + .count(db) + .await?; + if count > 0 { + assert_eq!(count, 1); + return Err(MetaError::catalog_duplicated("function", &pb_function.name)); + } + Ok(()) +} + +/// `check_connection_name_duplicate` checks whether the connection name is already used in the target namespace. +pub async fn check_connection_name_duplicate( + pb_connection: &PbConnection, + db: &C, +) -> MetaResult<()> +where + C: ConnectionTrait, +{ + let count = Connection::find() + .inner_join(Object) + .filter( + object::Column::DatabaseId + .eq(pb_connection.database_id as DatabaseId) + .and(object::Column::SchemaId.eq(pb_connection.schema_id as SchemaId)) + .and(connection::Column::Name.eq(&pb_connection.name)), + ) + .count(db) + .await?; + if count > 0 { + assert_eq!(count, 1); + return Err(MetaError::catalog_duplicated( + "connection", + &pb_connection.name, + )); + } + Ok(()) +} + +/// `check_relation_name_duplicate` checks whether the relation name is already used in the target namespace. +pub async fn check_relation_name_duplicate( + name: &str, + database_id: DatabaseId, + schema_id: SchemaId, + db: &C, +) -> MetaResult<()> +where + C: ConnectionTrait, +{ + macro_rules! check_duplicated { + ($obj_type:expr, $entity:ident, $table:ident) => { + let count = Object::find() + .inner_join($entity) + .filter( + object::Column::DatabaseId + .eq(Some(database_id)) + .and(object::Column::SchemaId.eq(Some(schema_id))) + .and($table::Column::Name.eq(name)), + ) + .count(db) + .await?; + if count != 0 { + return Err(MetaError::catalog_duplicated($obj_type.as_str(), name)); + } + }; + } + check_duplicated!(ObjectType::Table, Table, table); + check_duplicated!(ObjectType::Source, Source, source); + check_duplicated!(ObjectType::Sink, Sink, sink); + check_duplicated!(ObjectType::Index, Index, index); + check_duplicated!(ObjectType::View, View, view); + + Ok(()) +} + +/// `check_schema_name_duplicate` checks whether the schema name is already used in the target database. +pub async fn check_schema_name_duplicate( + name: &str, + database_id: DatabaseId, + db: &C, +) -> MetaResult<()> +where + C: ConnectionTrait, +{ + let count = Object::find() + .inner_join(Schema) + .filter( + object::Column::ObjType + .eq(ObjectType::Schema) + .and(object::Column::DatabaseId.eq(Some(database_id))) + .and(schema::Column::Name.eq(name)), + ) + .count(db) + .await?; + if count != 0 { + return Err(MetaError::catalog_duplicated("schema", name)); + } + + Ok(()) +} + +/// `ensure_object_not_refer` ensures that object are not used by any other ones except indexes. +pub async fn ensure_object_not_refer( + object_type: ObjectType, + object_id: ObjectId, + db: &C, +) -> MetaResult<()> +where + C: ConnectionTrait, +{ + // Ignore indexes. + let count = if object_type == ObjectType::Table { + ObjectDependency::find() + .join( + JoinType::InnerJoin, + object_dependency::Relation::Object1.def(), + ) + .filter( + object_dependency::Column::Oid + .eq(object_id) + .and(object::Column::ObjType.ne(ObjectType::Index)), + ) + .count(db) + .await? + } else { + ObjectDependency::find() + .filter(object_dependency::Column::Oid.eq(object_id)) + .count(db) + .await? + }; + if count != 0 { + return Err(MetaError::permission_denied(format!( + "{} used by {} other objects.", + object_type.as_str(), + count + ))); + } + Ok(()) +} + +/// List all objects that are using the given one. +pub async fn get_referring_objects(object_id: ObjectId, db: &C) -> MetaResult> +where + C: ConnectionTrait, +{ + let objs = ObjectDependency::find() + .filter(object_dependency::Column::Oid.eq(object_id)) + .join( + JoinType::InnerJoin, + object_dependency::Relation::Object1.def(), + ) + .into_partial_model() + .all(db) + .await?; + + Ok(objs) +} + +/// `ensure_schema_empty` ensures that the schema is empty, used by `DROP SCHEMA`. +pub async fn ensure_schema_empty(schema_id: SchemaId, db: &C) -> MetaResult<()> +where + C: ConnectionTrait, +{ + let count = Object::find() + .filter(object::Column::SchemaId.eq(Some(schema_id))) + .count(db) + .await?; + if count != 0 { + return Err(MetaError::permission_denied("schema is not empty".into())); + } + + Ok(()) +} diff --git a/src/meta/src/dashboard/mod.rs b/src/meta/src/dashboard/mod.rs index a08eaa70cf515..6a10dd9c02bb3 100644 --- a/src/meta/src/dashboard/mod.rs +++ b/src/meta/src/dashboard/mod.rs @@ -17,13 +17,14 @@ mod proxy; use std::collections::HashMap; use std::net::SocketAddr; +use std::path::Path as FilePath; use std::sync::Arc; use anyhow::{anyhow, Result}; -use axum::body::Body; +use axum::body::{boxed, Body}; use axum::extract::{Extension, Path}; use axum::http::{Method, StatusCode}; -use axum::response::IntoResponse; +use axum::response::{IntoResponse, Response}; use axum::routing::{get, get_service}; use axum::Router; use hyper::Request; @@ -45,8 +46,7 @@ pub struct DashboardService { pub cluster_manager: ClusterManagerRef, pub fragment_manager: FragmentManagerRef, pub compute_clients: ComputeClientPool, - - // TODO: replace with catalog manager. + pub ui_path: Option, pub meta_store: MetaStoreRef, } @@ -56,11 +56,15 @@ pub(super) mod handlers { use anyhow::Context; use axum::Json; use itertools::Itertools; + use risingwave_common::bail; + use risingwave_common_heap_profiling::COLLAPSED_SUFFIX; use risingwave_pb::catalog::table::TableType; use risingwave_pb::catalog::{Sink, Source, Table}; - use risingwave_pb::common::WorkerNode; + use risingwave_pb::common::{WorkerNode, WorkerType}; use risingwave_pb::meta::{ActorLocation, PbTableFragments}; - use risingwave_pb::monitor_service::StackTraceResponse; + use risingwave_pb::monitor_service::{ + HeapProfilingResponse, ListHeapProfilingResponse, StackTraceResponse, + }; use serde_json::json; use super::*; @@ -75,6 +79,12 @@ pub(super) mod handlers { DashboardError(err.into()) } + impl From for DashboardError { + fn from(value: anyhow::Error) -> Self { + DashboardError(value) + } + } + impl IntoResponse for DashboardError { fn into_response(self) -> axum::response::Response { let mut resp = Json(json!({ @@ -91,12 +101,11 @@ pub(super) mod handlers { Path(ty): Path, Extension(srv): Extension, ) -> Result>> { - use risingwave_pb::common::WorkerType; let mut result = srv .cluster_manager .list_worker_node( - WorkerType::from_i32(ty) - .ok_or_else(|| anyhow!("invalid worker type")) + WorkerType::try_from(ty) + .map_err(|_| anyhow!("invalid worker type")) .map_err(err)?, None, ) @@ -188,6 +197,39 @@ pub(super) mod handlers { Ok(Json(table_fragments)) } + async fn dump_await_tree_inner( + worker_nodes: impl IntoIterator, + compute_clients: &ComputeClientPool, + ) -> Result> { + let mut all = Default::default(); + + fn merge(a: &mut StackTraceResponse, b: StackTraceResponse) { + a.actor_traces.extend(b.actor_traces); + a.rpc_traces.extend(b.rpc_traces); + a.compaction_task_traces.extend(b.compaction_task_traces); + } + + for worker_node in worker_nodes { + let client = compute_clients.get(worker_node).await.map_err(err)?; + let result = client.stack_trace().await.map_err(err)?; + + merge(&mut all, result); + } + + Ok(all.into()) + } + + pub async fn dump_await_tree_all( + Extension(srv): Extension, + ) -> Result> { + let worker_nodes = srv + .cluster_manager + .list_worker_node(WorkerType::ComputeNode, None) + .await; + + dump_await_tree_inner(&worker_nodes, &srv.compute_clients).await + } + pub async fn dump_await_tree( Path(worker_id): Path, Extension(srv): Extension, @@ -200,17 +242,95 @@ pub(super) mod handlers { .map_err(err)? .worker_node; + dump_await_tree_inner(std::iter::once(&worker_node), &srv.compute_clients).await + } + + pub async fn heap_profile( + Path(worker_id): Path, + Extension(srv): Extension, + ) -> Result> { + let worker_node = srv + .cluster_manager + .get_worker_by_id(worker_id) + .await + .context("worker node not found") + .map_err(err)? + .worker_node; + let client = srv.compute_clients.get(&worker_node).await.map_err(err)?; - let result = client.stack_trace().await.map_err(err)?; + let result = client.heap_profile("".to_string()).await.map_err(err)?; Ok(result.into()) } + + pub async fn list_heap_profile( + Path(worker_id): Path, + Extension(srv): Extension, + ) -> Result> { + let worker_node = srv + .cluster_manager + .get_worker_by_id(worker_id) + .await + .context("worker node not found") + .map_err(err)? + .worker_node; + + let client = srv.compute_clients.get(&worker_node).await.map_err(err)?; + + let result = client.list_heap_profile().await.map_err(err)?; + Ok(result.into()) + } + + pub async fn analyze_heap( + Path((worker_id, file_path)): Path<(WorkerId, String)>, + Extension(srv): Extension, + ) -> Result { + if srv.ui_path.is_none() { + bail!("Should provide ui_path"); + } + + let file_path = + String::from_utf8(base64_url::decode(&file_path).map_err(err)?).map_err(err)?; + + let file_name = FilePath::new(&file_path) + .file_name() + .unwrap() + .to_string_lossy() + .to_string(); + + let collapsed_file_name = format!("{}.{}", file_name, COLLAPSED_SUFFIX); + + let worker_node = srv + .cluster_manager + .get_worker_by_id(worker_id) + .await + .context("worker node not found") + .map_err(err)? + .worker_node; + + let client = srv.compute_clients.get(&worker_node).await.map_err(err)?; + + let collapsed_bin = client + .analyze_heap(file_path.clone()) + .await + .map_err(err)? + .result; + let collapsed_str = String::from_utf8_lossy(&collapsed_bin).to_string(); + + let response = Response::builder() + .header("Content-Type", "application/octet-stream") + .header("Content-Disposition", collapsed_file_name) + .body(boxed(collapsed_str)); + + response.map_err(err) + } } impl DashboardService { - pub async fn serve(self, ui_path: Option) -> Result<()> { + pub async fn serve(self) -> Result<()> { use handlers::*; + let ui_path = self.ui_path.clone(); let srv = Arc::new(self); let cors_layer = CorsLayer::new() @@ -233,6 +353,13 @@ impl DashboardService { get(prometheus::list_prometheus_actor_back_pressure), ) .route("/monitor/await_tree/:worker_id", get(dump_await_tree)) + .route("/monitor/await_tree/", get(dump_await_tree_all)) + .route("/monitor/dump_heap_profile/:worker_id", get(heap_profile)) + .route( + "/monitor/list_heap_profile/:worker_id", + get(list_heap_profile), + ) + .route("/monitor/analyze/:worker_id/*path", get(analyze_heap)) .layer( ServiceBuilder::new() .layer(AddExtensionLayer::new(srv.clone())) diff --git a/src/meta/src/dashboard/prometheus.rs b/src/meta/src/dashboard/prometheus.rs index 49431a29afd65..24709348c7865 100644 --- a/src/meta/src/dashboard/prometheus.rs +++ b/src/meta/src/dashboard/prometheus.rs @@ -134,7 +134,7 @@ pub async fn list_prometheus_actor_back_pressure( ) -> Result> { if let Some(ref client) = srv.prometheus_client { let now = SystemTime::now(); - let back_pressure_query = "rate(stream_actor_output_buffer_blocking_duration_ns{job=~\"compute\"}[60s]) / 1000000000"; + let back_pressure_query = "avg(rate(stream_actor_output_buffer_blocking_duration_ns[60s])) by (fragment_id, downstream_fragment_id) / 1000000000"; let result = client .query_range( back_pressure_query, diff --git a/src/meta/src/error.rs b/src/meta/src/error.rs index b6c86b3f1eec0..03323d53fa0af 100644 --- a/src/meta/src/error.rs +++ b/src/meta/src/error.rs @@ -55,6 +55,9 @@ enum MetaErrorInner { #[error("{0} id not found: {1}")] CatalogIdNotFound(&'static str, u32), + #[error("table_fragment not exist: id={0}")] + FragmentNotFound(u32), + #[error("{0} with name {1} exists")] Duplicated(&'static str, String), @@ -62,7 +65,7 @@ enum MetaErrorInner { Unavailable(String), #[error("Election failed: {0}")] - Election(etcd_client::Error), + Election(String), #[error("Cancelled: {0}")] Cancelled(String), @@ -73,7 +76,7 @@ enum MetaErrorInner { #[error("Sink error: {0}")] Sink(SinkError), - #[error("AWS SDK error: {}", DisplayErrorContext(&**.0))] + #[error("AWS SDK error: {}", DisplayErrorContext(& * *.0))] Aws(BoxedError), #[error(transparent)] @@ -134,6 +137,14 @@ impl MetaError { MetaErrorInner::CatalogIdNotFound(relation, id.into()).into() } + pub fn fragment_not_found>(id: T) -> Self { + MetaErrorInner::FragmentNotFound(id.into()).into() + } + + pub fn is_fragment_not_found(&self) -> bool { + matches!(self.inner.as_ref(), &MetaErrorInner::FragmentNotFound(..)) + } + pub fn catalog_duplicated>(relation: &'static str, name: T) -> Self { MetaErrorInner::Duplicated(relation, name.into()).into() } @@ -165,7 +176,7 @@ impl From for MetaError { impl From for MetaError { fn from(e: etcd_client::Error) -> Self { - MetaErrorInner::Election(e).into() + MetaErrorInner::Election(e.to_string()).into() } } diff --git a/src/meta/src/hummock/compaction/compaction_config.rs b/src/meta/src/hummock/compaction/compaction_config.rs index 13d568b3eaa37..4dfd0edc62a13 100644 --- a/src/meta/src/hummock/compaction/compaction_config.rs +++ b/src/meta/src/hummock/compaction/compaction_config.rs @@ -65,6 +65,7 @@ impl CompactionConfigBuilder { level0_overlapping_sub_level_compact_level_count: compaction_config::level0_overlapping_sub_level_compact_level_count(), tombstone_reclaim_ratio: compaction_config::tombstone_reclaim_ratio(), + enable_emergency_picker: compaction_config::enable_emergency_picker(), }, } } diff --git a/src/meta/src/hummock/compaction/mod.rs b/src/meta/src/hummock/compaction/mod.rs index a30e4b0422111..a056414034243 100644 --- a/src/meta/src/hummock/compaction/mod.rs +++ b/src/meta/src/hummock/compaction/mod.rs @@ -15,41 +15,36 @@ #![expect(clippy::arc_with_non_send_sync, reason = "FIXME: later")] pub mod compaction_config; -mod level_selector; mod overlap_strategy; -mod tombstone_compaction_selector; use risingwave_common::catalog::TableOption; -use risingwave_hummock_sdk::compaction_group::StateTableId; use risingwave_hummock_sdk::prost_key_range::KeyRangeExt; -use risingwave_pb::hummock::compact_task::{self, TaskStatus}; +use risingwave_pb::hummock::compact_task::{self, TaskStatus, TaskType}; mod picker; +pub mod selector; + use std::collections::{HashMap, HashSet}; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use picker::{LevelCompactionPicker, ManualCompactionPicker, TierCompactionPicker}; +use picker::{LevelCompactionPicker, TierCompactionPicker}; use risingwave_hummock_sdk::{ - can_concat, CompactionGroupId, HummockCompactionTaskId, HummockEpoch, HummockSstableId, + can_concat, CompactionGroupId, HummockCompactionTaskId, HummockEpoch, }; use risingwave_pb::hummock::compaction_config::CompactionMode; use risingwave_pb::hummock::hummock_version::Levels; use risingwave_pb::hummock::{CompactTask, CompactionConfig, KeyRange, LevelType}; +pub use selector::CompactionSelector; -pub use crate::hummock::compaction::level_selector::{ - default_level_selector, DynamicLevelSelector, DynamicLevelSelectorCore, LevelSelector, - ManualCompactionSelector, SpaceReclaimCompactionSelector, TtlCompactionSelector, -}; +use self::selector::LocalSelectorStatistic; use crate::hummock::compaction::overlap_strategy::{OverlapStrategy, RangeOverlapStrategy}; -use crate::hummock::compaction::picker::{CompactionInput, LocalPickerStatistic}; -pub use crate::hummock::compaction::tombstone_compaction_selector::TombstoneCompactionSelector; +use crate::hummock::compaction::picker::CompactionInput; use crate::hummock::level_handler::LevelHandler; use crate::hummock::model::CompactionGroup; -use crate::rpc::metrics::MetaMetrics; pub struct CompactStatus { - pub(crate) compaction_group_id: CompactionGroupId, - pub(crate) level_handlers: Vec, + pub compaction_group_id: CompactionGroupId, + pub level_handlers: Vec, } impl Debug for CompactStatus { @@ -111,7 +106,7 @@ impl CompactStatus { task_id: HummockCompactionTaskId, group: &CompactionGroup, stats: &mut LocalSelectorStatistic, - selector: &mut Box, + selector: &mut Box, table_id_to_options: HashMap, ) -> Option { // When we compact the files, we must make the result of compaction meet the following @@ -161,6 +156,10 @@ impl CompactStatus { } pub fn is_trivial_move_task(task: &CompactTask) -> bool { + if task.task_type() != TaskType::Dynamic && task.task_type() != TaskType::Emergency { + return false; + } + if task.input_ssts.len() == 1 { return task.input_ssts[0].level_idx == 0 && can_concat(&task.input_ssts[0].table_infos); @@ -209,74 +208,6 @@ impl CompactStatus { } } -#[derive(Clone, Debug, PartialEq)] -pub struct ManualCompactionOption { - /// Filters out SSTs to pick. Has no effect if empty. - pub sst_ids: Vec, - /// Filters out SSTs to pick. - pub key_range: KeyRange, - /// Filters out SSTs to pick. Has no effect if empty. - pub internal_table_id: HashSet, - /// Input level. - pub level: usize, -} - -impl Default for ManualCompactionOption { - fn default() -> Self { - Self { - sst_ids: vec![], - key_range: KeyRange { - left: vec![], - right: vec![], - right_exclusive: false, - }, - internal_table_id: HashSet::default(), - level: 1, - } - } -} - -#[derive(Default)] -pub struct LocalSelectorStatistic { - skip_picker: Vec<(usize, usize, LocalPickerStatistic)>, -} - -impl LocalSelectorStatistic { - pub fn report_to_metrics(&self, group_id: u64, metrics: &MetaMetrics) { - for (start_level, target_level, stats) in &self.skip_picker { - let level_label = format!("cg{}-{}-to-{}", group_id, start_level, target_level); - if stats.skip_by_write_amp_limit > 0 { - metrics - .compact_skip_frequency - .with_label_values(&[level_label.as_str(), "write-amp"]) - .inc(); - } - if stats.skip_by_count_limit > 0 { - metrics - .compact_skip_frequency - .with_label_values(&[level_label.as_str(), "count"]) - .inc(); - } - if stats.skip_by_pending_files > 0 { - metrics - .compact_skip_frequency - .with_label_values(&[level_label.as_str(), "pending-files"]) - .inc(); - } - if stats.skip_by_overlapping > 0 { - metrics - .compact_skip_frequency - .with_label_values(&[level_label.as_str(), "overlapping"]) - .inc(); - } - metrics - .compact_skip_frequency - .with_label_values(&[level_label.as_str(), "picker"]) - .inc(); - } - } -} - pub fn create_compaction_task( compaction_config: &CompactionConfig, input: CompactionInput, diff --git a/src/meta/src/hummock/compaction/picker/base_level_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/base_level_compaction_picker.rs index c224fbfe6ce55..6e1b33b1935d2 100644 --- a/src/meta/src/hummock/compaction/picker/base_level_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/base_level_compaction_picker.rs @@ -131,28 +131,13 @@ impl LevelCompactionPicker { level_handlers: &[LevelHandler], stats: &mut LocalPickerStatistic, ) -> Option { - // TODO: remove this - let l0_size = l0.total_file_size - level_handlers[0].get_pending_file_size(); - let base_level_size = target_level.total_file_size - - level_handlers[target_level.level_idx as usize].get_pending_file_size(); - if l0_size < base_level_size { - stats.skip_by_write_amp_limit += 1; - return None; - } - - // no running base_compaction - let strict_check = level_handlers[0] - .get_pending_tasks() - .iter() - .any(|task| task.target_level != 0); - let overlap_strategy = create_overlap_strategy(self.config.compaction_mode()); let min_compaction_bytes = self.config.sub_level_max_compaction_bytes; let non_overlap_sub_level_picker = NonOverlapSubLevelPicker::new( min_compaction_bytes, // divide by 2 because we need to select files of base level and it need use the other // half quota. - std::cmp::min( + std::cmp::max( self.config.max_bytes_for_level_base, self.config.max_compaction_bytes / 2, ), @@ -239,8 +224,7 @@ impl LevelCompactionPicker { &result, ValidationRuleType::ToBase, stats, - ) && strict_check - { + ) { continue; } @@ -256,12 +240,7 @@ pub mod tests { use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::compaction::level_selector::tests::{ - generate_l0_nonoverlapping_multi_sublevels, generate_l0_nonoverlapping_sublevels, - generate_l0_overlapping_sublevels, generate_level, generate_table, - push_table_level0_nonoverlapping, push_table_level0_overlapping, - push_tables_level0_nonoverlapping, - }; + use crate::hummock::compaction::selector::tests::*; use crate::hummock::compaction::{CompactionMode, TierCompactionPicker}; fn create_compaction_picker_for_test() -> LevelCompactionPicker { @@ -588,7 +567,8 @@ pub mod tests { // Pick with small max_compaction_bytes results partial sub levels included in input. let config = Arc::new( CompactionConfigBuilder::new() - .max_compaction_bytes(50000) + .max_compaction_bytes(100010) + .max_bytes_for_level_base(512) .level0_sub_level_compact_level_count(1) .build(), ); diff --git a/src/meta/src/hummock/compaction/picker/compaction_task_validator.rs b/src/meta/src/hummock/compaction/picker/compaction_task_validator.rs index 4de77467205f7..7452f65d6503a 100644 --- a/src/meta/src/hummock/compaction/picker/compaction_task_validator.rs +++ b/src/meta/src/hummock/compaction/picker/compaction_task_validator.rs @@ -41,7 +41,6 @@ impl CompactionTaskValidator { ValidationRuleType::Tier, Box::new(TierCompactionTaskValidationRule { config: config.clone(), - enable: true, }), ); @@ -49,31 +48,34 @@ impl CompactionTaskValidator { ValidationRuleType::Intra, Box::new(IntraCompactionTaskValidationRule { config: config.clone(), - enable: true, }), ); validation_rules.insert( ValidationRuleType::ToBase, - Box::new(BaseCompactionTaskValidationRule { - config, - enable: true, - }), + Box::new(BaseCompactionTaskValidationRule { config }), ); CompactionTaskValidator { validation_rules } } + pub fn unused() -> Self { + CompactionTaskValidator { + validation_rules: HashMap::default(), + } + } + pub fn valid_compact_task( &self, input: &CompactionInput, picker_type: ValidationRuleType, stats: &mut LocalPickerStatistic, ) -> bool { - self.validation_rules - .get(&picker_type) - .unwrap() - .validate(input, stats) + if let Some(validation_rule) = self.validation_rules.get(&picker_type) { + validation_rule.validate(input, stats) + } else { + true + } } } @@ -83,12 +85,19 @@ pub trait CompactionTaskValidationRule { struct TierCompactionTaskValidationRule { config: Arc, - enable: bool, } impl CompactionTaskValidationRule for TierCompactionTaskValidationRule { fn validate(&self, input: &CompactionInput, stats: &mut LocalPickerStatistic) -> bool { - if !self.enable { + // Limit sstable file count to avoid using too much memory. + let overlapping_max_compact_file_numer = std::cmp::min( + self.config.level0_max_compact_file_number, + MAX_COMPACT_LEVEL_COUNT as u64, + ); + + if input.total_file_count >= overlapping_max_compact_file_numer + || input.input_levels.len() >= MAX_COMPACT_LEVEL_COUNT + { return true; } @@ -99,25 +108,13 @@ impl CompactionTaskValidationRule for TierCompactionTaskValidationRule { * self.config.level0_overlapping_sub_level_compact_level_count as u64, ); - // Limit sstable file count to avoid using too much memory. - let overlapping_max_compact_file_numer = std::cmp::min( - self.config.level0_max_compact_file_number, - MAX_COMPACT_LEVEL_COUNT as u64, - ); - - let waiting_enough_files = { - if input.select_input_size > max_compaction_bytes { - false - } else { - input.total_file_count <= overlapping_max_compact_file_numer - } - }; - // If waiting_enough_files is not satisfied, we will raise the priority of the number of // levels to ensure that we can merge as many sub_levels as possible let tier_sub_level_compact_level_count = self.config.level0_overlapping_sub_level_compact_level_count as usize; - if input.input_levels.len() < tier_sub_level_compact_level_count && waiting_enough_files { + if input.input_levels.len() < tier_sub_level_compact_level_count + && input.select_input_size < max_compaction_bytes + { stats.skip_by_count_limit += 1; return false; } @@ -128,12 +125,13 @@ impl CompactionTaskValidationRule for TierCompactionTaskValidationRule { struct IntraCompactionTaskValidationRule { config: Arc, - enable: bool, } impl CompactionTaskValidationRule for IntraCompactionTaskValidationRule { fn validate(&self, input: &CompactionInput, stats: &mut LocalPickerStatistic) -> bool { - if !self.enable { + if input.total_file_count >= self.config.level0_max_compact_file_number + || input.input_levels.len() >= MAX_COMPACT_LEVEL_COUNT + { return true; } @@ -141,6 +139,7 @@ impl CompactionTaskValidationRule for IntraCompactionTaskValidationRule { self.config.level0_sub_level_compact_level_count as usize; if input.input_levels.len() < intra_sub_level_compact_level_count { + stats.skip_by_count_limit += 1; return false; } @@ -163,31 +162,24 @@ impl CompactionTaskValidationRule for IntraCompactionTaskValidationRule { max_level_size * self.config.level0_sub_level_compact_level_count as u64 / 2 >= input.select_input_size; - if is_write_amp_large && input.total_file_count < self.config.level0_max_compact_file_number - { + if is_write_amp_large { stats.skip_by_write_amp_limit += 1; return false; } - if input.input_levels.len() < intra_sub_level_compact_level_count - && input.total_file_count < self.config.level0_max_compact_file_number - { - stats.skip_by_count_limit += 1; - return false; - } - true } } struct BaseCompactionTaskValidationRule { config: Arc, - enable: bool, } impl CompactionTaskValidationRule for BaseCompactionTaskValidationRule { fn validate(&self, input: &CompactionInput, stats: &mut LocalPickerStatistic) -> bool { - if !self.enable { + if input.total_file_count >= self.config.level0_max_compact_file_number + || input.input_levels.len() >= MAX_COMPACT_LEVEL_COUNT + { return true; } diff --git a/src/meta/src/hummock/compaction/picker/emergency_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/emergency_compaction_picker.rs new file mode 100644 index 0000000000000..9866f7b644262 --- /dev/null +++ b/src/meta/src/hummock/compaction/picker/emergency_compaction_picker.rs @@ -0,0 +1,64 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use risingwave_pb::hummock::hummock_version::Levels; +use risingwave_pb::hummock::CompactionConfig; + +use super::{ + CompactionInput, CompactionPicker, CompactionTaskValidator, LevelCompactionPicker, + LocalPickerStatistic, TierCompactionPicker, +}; +use crate::hummock::level_handler::LevelHandler; + +pub struct EmergencyCompactionPicker { + target_level: usize, + config: Arc, +} + +impl EmergencyCompactionPicker { + pub fn new(target_level: usize, config: Arc) -> Self { + Self { + target_level, + config, + } + } + + pub fn pick_compaction( + &self, + levels: &Levels, + level_handlers: &[LevelHandler], + stats: &mut LocalPickerStatistic, + ) -> Option { + let unused_validator = Arc::new(CompactionTaskValidator::unused()); + + let mut base_level_compaction_picker = LevelCompactionPicker::new_with_validator( + self.target_level, + self.config.clone(), + unused_validator.clone(), + ); + + if let Some(ret) = + base_level_compaction_picker.pick_compaction(levels, level_handlers, stats) + { + return Some(ret); + } + + let mut tier_compaction_picker = + TierCompactionPicker::new_with_validator(self.config.clone(), unused_validator); + + tier_compaction_picker.pick_compaction(levels, level_handlers, stats) + } +} diff --git a/src/meta/src/hummock/compaction/picker/intra_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/intra_compaction_picker.rs index 541b93254172b..980c3030a98fb 100644 --- a/src/meta/src/hummock/compaction/picker/intra_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/intra_compaction_picker.rs @@ -260,7 +260,7 @@ pub mod tests { use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::compaction::level_selector::tests::{ + use crate::hummock::compaction::selector::tests::{ generate_l0_nonoverlapping_multi_sublevels, generate_l0_nonoverlapping_sublevels, generate_l0_overlapping_sublevels, generate_level, generate_table, push_table_level0_overlapping, push_tables_level0_nonoverlapping, @@ -623,53 +623,4 @@ pub mod tests { assert!(is_l0_trivial_move(&ret)); assert_eq!(ret.input_levels[0].table_infos.len(), 1); } - - #[test] - fn test_issue_11154() { - let mut local_stats = LocalPickerStatistic::default(); - let mut l0 = generate_l0_overlapping_sublevels(vec![ - vec![ - generate_table(4, 1, 1, 200, 1), - generate_table(5, 1, 400, 600, 1), - ], - vec![ - generate_table(6, 1, 1, 200, 1), - generate_table(7, 1, 400, 600, 1), - ], - vec![ - generate_table(8, 1, 1, 200, 1), - generate_table(9, 1, 400, 600, 1), - ], - vec![generate_table(10, 1, 1, 600, 1)], - ]); - // We can set level_type only because the input above is valid. - for s in &mut l0.sub_levels { - s.level_type = LevelType::Nonoverlapping as i32; - } - let levels = Levels { - l0: Some(l0), - levels: vec![generate_level(1, vec![generate_table(3, 1, 0, 100000, 1)])], - member_table_ids: vec![1], - ..Default::default() - }; - let levels_handler = vec![LevelHandler::new(0), LevelHandler::new(1)]; - - // Pick with large max_compaction_bytes results all sub levels included in input. - let config = Arc::new( - CompactionConfigBuilder::new() - .max_compaction_bytes(800) - .sub_level_max_compaction_bytes(50000) - .max_bytes_for_level_base(500000) - .level0_sub_level_compact_level_count(1) - .build(), - ); - // Only include sub-level 0 results will violate MAX_WRITE_AMPLIFICATION. - // So all sub-levels are included to make write amplification < MAX_WRITE_AMPLIFICATION. - let mut picker = IntraCompactionPicker::new(config); - let ret = picker - .pick_compaction(&levels, &levels_handler, &mut local_stats) - .unwrap(); - // avoid add sst_10 and cause a big task - assert_eq!(3, ret.input_levels.len()); - } } diff --git a/src/meta/src/hummock/compaction/picker/manual_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/manual_compaction_picker.rs index e8f8c908d0fd3..23b1f0b6a9960 100644 --- a/src/meta/src/hummock/compaction/picker/manual_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/manual_compaction_picker.rs @@ -24,7 +24,7 @@ use super::{CompactionInput, CompactionPicker, LocalPickerStatistic}; use crate::hummock::compaction::overlap_strategy::{ OverlapInfo, OverlapStrategy, RangeOverlapInfo, }; -use crate::hummock::compaction::ManualCompactionOption; +use crate::hummock::compaction::selector::ManualCompactionOption; use crate::hummock::level_handler::LevelHandler; pub struct ManualCompactionPicker { @@ -333,12 +333,12 @@ pub mod tests { use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::compaction::level_selector::tests::{ + use crate::hummock::compaction::overlap_strategy::RangeOverlapStrategy; + use crate::hummock::compaction::selector::tests::{ assert_compaction_task, generate_l0_nonoverlapping_sublevels, generate_l0_overlapping_sublevels, generate_level, generate_table, }; - use crate::hummock::compaction::level_selector::{LevelSelector, ManualCompactionSelector}; - use crate::hummock::compaction::overlap_strategy::RangeOverlapStrategy; + use crate::hummock::compaction::selector::{CompactionSelector, ManualCompactionSelector}; use crate::hummock::compaction::LocalSelectorStatistic; use crate::hummock::model::CompactionGroup; use crate::hummock::test_utils::iterator_test_key_of_epoch; diff --git a/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs index 0cf44795e0acb..c17fa305be0e4 100644 --- a/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs @@ -209,6 +209,13 @@ impl NonOverlapSubLevelPicker { break; } + // more than 1 sub_level + if ret.total_file_count > 1 && ret.total_file_size >= self.max_compaction_bytes + || ret.total_file_count >= self.max_file_count as usize + { + break; + } + let mut overlap_files_range = overlap_info.check_multiple_include(&target_level.table_infos); if overlap_files_range.is_empty() { @@ -288,15 +295,6 @@ impl NonOverlapSubLevelPicker { .map(|(_, files)| files.len()) .sum::(); - // more than 1 sub_level - if ret.total_file_count > 1 - && (ret.total_file_size + (add_files_size + current_level_size) - >= self.max_compaction_bytes - || ret.total_file_count + add_files_count >= self.max_file_count as usize) - { - break; - } - if ret .sstable_infos .iter() @@ -379,10 +377,10 @@ pub mod tests { pub use risingwave_pb::hummock::{KeyRange, Level, LevelType}; use super::*; - use crate::hummock::compaction::level_selector::tests::{ + use crate::hummock::compaction::overlap_strategy::RangeOverlapStrategy; + use crate::hummock::compaction::selector::tests::{ generate_l0_nonoverlapping_sublevels, generate_table, }; - use crate::hummock::compaction::overlap_strategy::RangeOverlapStrategy; #[test] fn test_compact_l1() { diff --git a/src/meta/src/hummock/compaction/picker/mod.rs b/src/meta/src/hummock/compaction/picker/mod.rs index 15e7a61f548ee..ac1a8f825aa33 100644 --- a/src/meta/src/hummock/compaction/picker/mod.rs +++ b/src/meta/src/hummock/compaction/picker/mod.rs @@ -13,6 +13,7 @@ // limitations under the License. mod base_level_compaction_picker; +mod emergency_compaction_picker; mod intra_compaction_picker; mod manual_compaction_picker; mod min_overlap_compaction_picker; @@ -26,6 +27,7 @@ mod compaction_task_validator; pub use base_level_compaction_picker::LevelCompactionPicker; pub use compaction_task_validator::{CompactionTaskValidator, ValidationRuleType}; +pub use emergency_compaction_picker::EmergencyCompactionPicker; pub use intra_compaction_picker::IntraCompactionPicker; pub use manual_compaction_picker::ManualCompactionPicker; pub use min_overlap_compaction_picker::MinOverlappingPicker; @@ -43,7 +45,7 @@ use crate::hummock::level_handler::LevelHandler; pub const MAX_COMPACT_LEVEL_COUNT: usize = 42; -#[derive(Default)] +#[derive(Default, Debug)] pub struct LocalPickerStatistic { pub skip_by_write_amp_limit: u64, pub skip_by_count_limit: u64, @@ -51,7 +53,7 @@ pub struct LocalPickerStatistic { pub skip_by_overlapping: u64, } -#[derive(Default)] +#[derive(Default, Debug)] pub struct CompactionInput { pub input_levels: Vec, pub target_level: usize, diff --git a/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs index a3ff21831fef8..7dc7a4688e644 100644 --- a/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs @@ -104,7 +104,6 @@ impl SpaceReclaimCompactionPicker { } while state.last_level <= levels.levels.len() { let mut is_trivial_task = true; - let mut select_file_size = 0; for sst in &levels.levels[state.last_level - 1].table_infos { let exist_count = self.exist_table_count(sst); let need_reclaim = exist_count < sst.table_ids.len(); @@ -122,15 +121,14 @@ impl SpaceReclaimCompactionPicker { } if !is_trivial_sst { - if !select_input_ssts.is_empty() && is_trivial_task { + if !select_input_ssts.is_empty() { break; } is_trivial_task = false; } select_input_ssts.push(sst.clone()); - select_file_size += sst.file_size; - if select_file_size > self.max_space_reclaim_bytes && !is_trivial_task { + if !is_trivial_task { break; } } @@ -174,12 +172,13 @@ mod test { use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::compaction::level_selector::tests::{ + use crate::hummock::compaction::selector::tests::{ assert_compaction_task, generate_l0_nonoverlapping_sublevels, generate_level, generate_table_with_ids_and_epochs, }; - use crate::hummock::compaction::level_selector::SpaceReclaimCompactionSelector; - use crate::hummock::compaction::{LevelSelector, LocalSelectorStatistic}; + use crate::hummock::compaction::selector::{ + CompactionSelector, LocalSelectorStatistic, SpaceReclaimCompactionSelector, + }; use crate::hummock::model::CompactionGroup; #[test] diff --git a/src/meta/src/hummock/compaction/picker/tier_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/tier_compaction_picker.rs index 99b17694f528e..a64bf489a197a 100644 --- a/src/meta/src/hummock/compaction/picker/tier_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/tier_compaction_picker.rs @@ -14,8 +14,6 @@ use std::sync::Arc; -use risingwave_hummock_sdk::can_concat; -use risingwave_hummock_sdk::prost_key_range::KeyRangeExt; use risingwave_pb::hummock::hummock_version::Levels; use risingwave_pb::hummock::{CompactionConfig, InputLevel, LevelType, OverlappingLevel}; @@ -69,33 +67,11 @@ impl TierCompactionPicker { continue; } - let mut input_level = InputLevel { + let input_level = InputLevel { level_idx: 0, level_type: level.level_type, table_infos: level.table_infos.clone(), }; - // Since the level is overlapping, we can change the order of origin sstable infos in - // task. - input_level.table_infos.sort_by(|sst1, sst2| { - let a = sst1.key_range.as_ref().unwrap(); - let b = sst2.key_range.as_ref().unwrap(); - a.compare(b) - }); - - if can_concat(&input_level.table_infos) { - return Some(CompactionInput { - select_input_size: input_level - .table_infos - .iter() - .map(|sst| sst.file_size) - .sum(), - total_file_count: input_level.table_infos.len() as u64, - input_levels: vec![input_level], - target_level: 0, - target_sub_level_id: level.sub_level_id, - ..Default::default() - }); - } let mut select_level_inputs = vec![input_level]; @@ -182,18 +158,17 @@ impl CompactionPicker for TierCompactionPicker { pub mod tests { use std::sync::Arc; - use risingwave_hummock_sdk::can_concat; use risingwave_hummock_sdk::compaction_group::hummock_version_ext::new_sub_level; use risingwave_pb::hummock::hummock_version::Levels; use risingwave_pb::hummock::{LevelType, OverlappingLevel}; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::compaction::level_selector::tests::{ - generate_l0_overlapping_sublevels, generate_table, push_table_level0_overlapping, - }; use crate::hummock::compaction::picker::{ CompactionPicker, LocalPickerStatistic, TierCompactionPicker, }; + use crate::hummock::compaction::selector::tests::{ + generate_l0_overlapping_sublevels, generate_table, push_table_level0_overlapping, + }; use crate::hummock::level_handler::LevelHandler; #[test] @@ -281,11 +256,8 @@ pub mod tests { // sub-level 0 is excluded because it's nonoverlapping and violating // sub_level_max_compaction_bytes. let mut picker = TierCompactionPicker::new(config); - let ret = picker - .pick_compaction(&levels, &levels_handler, &mut local_stats) - .unwrap(); - assert_eq!(ret.input_levels.len(), 1); - assert!(can_concat(&ret.input_levels[0].table_infos)); + let ret = picker.pick_compaction(&levels, &levels_handler, &mut local_stats); + assert!(ret.is_none()) } #[test] diff --git a/src/meta/src/hummock/compaction/picker/tombstone_reclaim_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/tombstone_reclaim_compaction_picker.rs index 994bfbc5ea557..04d8cb791c881 100644 --- a/src/meta/src/hummock/compaction/picker/tombstone_reclaim_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/tombstone_reclaim_compaction_picker.rs @@ -23,7 +23,6 @@ use crate::hummock::level_handler::LevelHandler; pub struct TombstoneReclaimCompactionPicker { overlap_strategy: Arc, - max_compaction_bytes: u64, delete_ratio: u64, range_delete_ratio: u64, } @@ -36,13 +35,11 @@ pub struct TombstoneReclaimPickerState { impl TombstoneReclaimCompactionPicker { pub fn new( overlap_strategy: Arc, - max_compaction_bytes: u64, delete_ratio: u64, range_delete_ratio: u64, ) -> Self { Self { overlap_strategy, - max_compaction_bytes, delete_ratio, range_delete_ratio, } @@ -55,34 +52,23 @@ impl TombstoneReclaimCompactionPicker { state: &mut TombstoneReclaimPickerState, ) -> Option { assert!(!levels.levels.is_empty()); - let mut select_input_ssts = vec![]; if state.last_level == 0 { state.last_level = 1; } while state.last_level <= levels.levels.len() { - let mut select_file_size = 0; + let mut select_input_ssts = vec![]; for sst in &levels.levels[state.last_level - 1].table_infos { let need_reclaim = (sst.range_tombstone_count * 100 >= sst.total_key_count * self.range_delete_ratio) || (sst.stale_key_count * 100 >= sst.total_key_count * self.delete_ratio); if !need_reclaim || level_handlers[state.last_level].is_pending_compact(&sst.sst_id) { - if !select_input_ssts.is_empty() { - // Our goal is to pick as many complete layers of data as possible and keep - // the picked files contiguous to avoid overlapping - // key_ranges, so the strategy is to pick as many - // contiguous files as possible (at least one) - break; - } continue; } select_input_ssts.push(sst.clone()); - select_file_size += sst.file_size; - if select_file_size > self.max_compaction_bytes { - break; - } + break; } // turn to next_round @@ -108,6 +94,7 @@ impl TombstoneReclaimCompactionPicker { } } if pending_compact { + state.last_level += 1; continue; } InputLevel { @@ -151,7 +138,7 @@ pub mod tests { use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; use crate::hummock::compaction::create_overlap_strategy; - use crate::hummock::compaction::level_selector::tests::{generate_level, generate_table}; + use crate::hummock::compaction::selector::tests::{generate_level, generate_table}; #[test] fn test_basic() { @@ -180,12 +167,7 @@ pub mod tests { let config = Arc::new(CompactionConfigBuilder::new().build()); let strategy = create_overlap_strategy(config.compaction_mode()); - let picker = TombstoneReclaimCompactionPicker::new( - strategy.clone(), - config.max_compaction_bytes, - 40, - 20, - ); + let picker = TombstoneReclaimCompactionPicker::new(strategy.clone(), 40, 20); let ret = picker.pick_compaction(&levels, &levels_handler, &mut state); assert!(ret.is_none()); let mut sst = generate_table(3, 1, 201, 300, 1); @@ -203,8 +185,7 @@ pub mod tests { sst.range_tombstone_count = 30; sst.total_key_count = 100; levels.levels[0].table_infos.push(sst); - let picker = - TombstoneReclaimCompactionPicker::new(strategy, config.max_compaction_bytes, 50, 10); + let picker = TombstoneReclaimCompactionPicker::new(strategy, 50, 10); let mut state = TombstoneReclaimPickerState::default(); let ret = picker .pick_compaction(&levels, &levels_handler, &mut state) diff --git a/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs index 9f84b99453f17..cc3b3ca41d84f 100644 --- a/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs @@ -61,15 +61,11 @@ impl TtlPickerState { } pub struct TtlReclaimCompactionPicker { - max_ttl_reclaim_bytes: u64, table_id_to_ttl: HashMap, } impl TtlReclaimCompactionPicker { - pub fn new( - max_ttl_reclaim_bytes: u64, - table_id_to_options: HashMap, - ) -> Self { + pub fn new(table_id_to_options: HashMap) -> Self { let table_id_to_ttl: HashMap = table_id_to_options .iter() .filter(|id_to_option| { @@ -79,10 +75,7 @@ impl TtlReclaimCompactionPicker { .map(|id_to_option| (*id_to_option.0, id_to_option.1.retention_seconds.unwrap())) .collect(); - Self { - max_ttl_reclaim_bytes, - table_id_to_ttl, - } + Self { table_id_to_ttl } } fn filter(&self, sst: &SstableInfo, current_epoch_physical_time: u64) -> bool { @@ -154,7 +147,6 @@ impl TtlReclaimCompactionPicker { } let current_epoch_physical_time = Epoch::now().physical_time(); - let mut select_file_size = 0; for sst in &reclaimed_level.table_infos { let unmatched_sst = sst @@ -167,22 +159,11 @@ impl TtlReclaimCompactionPicker { || level_handler.is_pending_compact(&sst.sst_id) || self.filter(sst, current_epoch_physical_time) { - if !select_input_ssts.is_empty() { - // Our goal is to pick as many complete layers of data as possible and keep the - // picked files contiguous to avoid overlapping key_ranges, so the strategy is - // to pick as many contiguous files as possible (at least one) - break; - } - continue; } select_input_ssts.push(sst.clone()); - select_file_size += sst.file_size; - - if select_file_size > self.max_ttl_reclaim_bytes { - break; - } + break; } // turn to next_round @@ -227,11 +208,11 @@ mod test { use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::compaction::level_selector::tests::{ + use crate::hummock::compaction::selector::tests::{ assert_compaction_task, generate_l0_nonoverlapping_sublevels, generate_level, generate_table_with_ids_and_epochs, }; - use crate::hummock::compaction::level_selector::{LevelSelector, TtlCompactionSelector}; + use crate::hummock::compaction::selector::{CompactionSelector, TtlCompactionSelector}; use crate::hummock::compaction::LocalSelectorStatistic; use crate::hummock::model::CompactionGroup; @@ -402,7 +383,7 @@ mod test { assert_compaction_task(&task, &levels_handler); assert_eq!(task.input.input_levels.len(), 2); assert_eq!(task.input.input_levels[0].level_idx, 4); - assert_eq!(task.input.input_levels[0].table_infos.len(), 5); + assert_eq!(task.input.input_levels[0].table_infos.len(), 1); let mut start_id = 2; for sst in &task.input.input_levels[0].table_infos { @@ -453,9 +434,9 @@ mod test { assert_eq!(task.input.input_levels[0].level_idx, 4); // test select index, picker will select file from state - assert_eq!(task.input.input_levels[0].table_infos.len(), 4); + assert_eq!(task.input.input_levels[0].table_infos.len(), 1); - let mut start_id = 7; + let mut start_id = 3; for sst in &task.input.input_levels[0].table_infos { assert_eq!(start_id, sst.get_sst_id()); start_id += 1; @@ -495,17 +476,6 @@ mod test { assert_eq!(start_id, sst.get_sst_id()); start_id += 1; } - - assert!(selector - .pick_compaction( - 1, - &group_config, - &levels, - &mut levels_handler, - &mut local_stats, - table_id_to_options, - ) - .is_none()) } { @@ -633,8 +603,8 @@ mod test { }, ); - let expect_task_file_count = [3, 2, 1]; - let expect_task_sst_id_range = vec![vec![2, 3, 4], vec![6, 7], vec![10]]; + let expect_task_file_count = [1, 1, 1]; + let expect_task_sst_id_range = vec![vec![2], vec![3], vec![4]]; for (index, x) in expect_task_file_count.iter().enumerate() { // // pick ttl reclaim let task = selector @@ -715,8 +685,8 @@ mod test { }, ); - let expect_task_file_count = [3, 3]; - let expect_task_sst_id_range = vec![vec![2, 3, 4], vec![5, 6, 7]]; + let expect_task_file_count = [1, 1]; + let expect_task_sst_id_range = vec![vec![2], vec![3]]; for (index, x) in expect_task_file_count.iter().enumerate() { if index == expect_task_file_count.len() - 1 { table_id_to_options.insert( diff --git a/src/meta/src/hummock/compaction/selector/emergency_selector.rs b/src/meta/src/hummock/compaction/selector/emergency_selector.rs new file mode 100644 index 0000000000000..3f5a81e264956 --- /dev/null +++ b/src/meta/src/hummock/compaction/selector/emergency_selector.rs @@ -0,0 +1,70 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; + +use risingwave_common::catalog::TableOption; +use risingwave_hummock_sdk::HummockCompactionTaskId; +use risingwave_pb::hummock::compact_task; +use risingwave_pb::hummock::hummock_version::Levels; + +use super::{CompactionSelector, DynamicLevelSelectorCore, LocalSelectorStatistic}; +use crate::hummock::compaction::picker::{EmergencyCompactionPicker, LocalPickerStatistic}; +use crate::hummock::compaction::{create_compaction_task, CompactionTask}; +use crate::hummock::level_handler::LevelHandler; +use crate::hummock::model::CompactionGroup; + +#[derive(Default)] +pub struct EmergencySelector {} + +impl CompactionSelector for EmergencySelector { + fn pick_compaction( + &mut self, + task_id: HummockCompactionTaskId, + group: &CompactionGroup, + levels: &Levels, + level_handlers: &mut [LevelHandler], + selector_stats: &mut LocalSelectorStatistic, + _table_id_to_options: HashMap, + ) -> Option { + let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); + let ctx = dynamic_level_core.calculate_level_base_size(levels); + let picker = + EmergencyCompactionPicker::new(ctx.base_level, group.compaction_config.clone()); + + let mut stats = LocalPickerStatistic::default(); + if let Some(compaction_input) = picker.pick_compaction(levels, level_handlers, &mut stats) { + compaction_input.add_pending_task(task_id, level_handlers); + + return Some(create_compaction_task( + group.compaction_config.as_ref(), + compaction_input, + ctx.base_level, + self.task_type(), + )); + } + + selector_stats.skip_picker.push((0, ctx.base_level, stats)); + + None + } + + fn name(&self) -> &'static str { + "EmergencyCompaction" + } + + fn task_type(&self) -> compact_task::TaskType { + compact_task::TaskType::Emergency + } +} diff --git a/src/meta/src/hummock/compaction/level_selector.rs b/src/meta/src/hummock/compaction/selector/level_selector.rs similarity index 65% rename from src/meta/src/hummock/compaction/level_selector.rs rename to src/meta/src/hummock/compaction/selector/level_selector.rs index 893dffd79d6bc..7c57b2ecf00d4 100644 --- a/src/meta/src/hummock/compaction/level_selector.rs +++ b/src/meta/src/hummock/compaction/selector/level_selector.rs @@ -25,22 +25,17 @@ use risingwave_hummock_sdk::HummockCompactionTaskId; use risingwave_pb::hummock::hummock_version::Levels; use risingwave_pb::hummock::{compact_task, CompactionConfig, LevelType}; -use super::picker::{ - CompactionTaskValidator, IntraCompactionPicker, SpaceReclaimCompactionPicker, - SpaceReclaimPickerState, TtlPickerState, TtlReclaimCompactionPicker, -}; use super::{ - create_compaction_task, LevelCompactionPicker, ManualCompactionOption, ManualCompactionPicker, - TierCompactionPicker, + create_compaction_task, CompactionSelector, LevelCompactionPicker, TierCompactionPicker, }; use crate::hummock::compaction::overlap_strategy::OverlapStrategy; use crate::hummock::compaction::picker::{ - CompactionPicker, LocalPickerStatistic, MinOverlappingPicker, + CompactionPicker, CompactionTaskValidator, IntraCompactionPicker, LocalPickerStatistic, + MinOverlappingPicker, }; use crate::hummock::compaction::{create_overlap_strategy, CompactionTask, LocalSelectorStatistic}; use crate::hummock::level_handler::LevelHandler; use crate::hummock::model::CompactionGroup; -use crate::rpc::metrics::MetaMetrics; pub const SCORE_BASE: u64 = 100; @@ -53,30 +48,23 @@ pub enum PickerType { BottomLevel, } -#[derive(Default, Debug)] -pub struct PickerInfo { - score: u64, - select_level: usize, - target_level: usize, - picker_type: PickerType, +impl ToString for PickerType { + fn to_string(&self) -> String { + match self { + PickerType::Tier => String::from("Tier"), + PickerType::Intra => String::from("Intra"), + PickerType::ToBase => String::from("ToBase"), + PickerType::BottomLevel => String::from("BottomLevel"), + } + } } -pub trait LevelSelector: Sync + Send { - fn pick_compaction( - &mut self, - task_id: HummockCompactionTaskId, - group: &CompactionGroup, - levels: &Levels, - level_handlers: &mut [LevelHandler], - selector_stats: &mut LocalSelectorStatistic, - table_id_to_options: HashMap, - ) -> Option; - - fn report_statistic_metrics(&self, _metrics: &MetaMetrics) {} - - fn name(&self) -> &'static str; - - fn task_type(&self) -> compact_task::TaskType; +#[derive(Default, Debug)] +pub struct PickerInfo { + pub score: u64, + pub select_level: usize, + pub target_level: usize, + pub picker_type: PickerType, } #[derive(Default, Debug)] @@ -201,7 +189,11 @@ impl DynamicLevelSelectorCore { ctx } - fn get_priority_levels(&self, levels: &Levels, handlers: &[LevelHandler]) -> SelectContext { + pub(crate) fn get_priority_levels( + &self, + levels: &Levels, + handlers: &[LevelHandler], + ) -> SelectContext { let mut ctx = self.calculate_level_base_size(levels); let idle_file_count = levels @@ -278,7 +270,7 @@ impl DynamicLevelSelectorCore { // Reduce the level num of l0 non-overlapping sub_level ctx.score_levels.push({ PickerInfo { - score: non_overlapping_score, + score: non_overlapping_score + 1, select_level: 0, target_level: ctx.base_level, picker_type: PickerType::ToBase, @@ -409,7 +401,7 @@ impl DynamicLevelSelectorCore { } } -impl LevelSelector for DynamicLevelSelector { +impl CompactionSelector for DynamicLevelSelector { fn pick_compaction( &mut self, task_id: HummockCompactionTaskId, @@ -466,397 +458,26 @@ impl LevelSelector for DynamicLevelSelector { } } -pub struct ManualCompactionSelector { - option: ManualCompactionOption, -} - -impl ManualCompactionSelector { - pub fn new(option: ManualCompactionOption) -> Self { - Self { option } - } -} - -impl LevelSelector for ManualCompactionSelector { - fn pick_compaction( - &mut self, - task_id: HummockCompactionTaskId, - group: &CompactionGroup, - levels: &Levels, - level_handlers: &mut [LevelHandler], - _selector_stats: &mut LocalSelectorStatistic, - _table_id_to_options: HashMap, - ) -> Option { - let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); - let overlap_strategy = create_overlap_strategy(group.compaction_config.compaction_mode()); - let ctx = dynamic_level_core.calculate_level_base_size(levels); - let (mut picker, base_level) = { - let target_level = if self.option.level == 0 { - ctx.base_level - } else if self.option.level == group.compaction_config.max_level as usize { - self.option.level - } else { - self.option.level + 1 - }; - if self.option.level > 0 && self.option.level < ctx.base_level { - return None; - } - ( - ManualCompactionPicker::new(overlap_strategy, self.option.clone(), target_level), - ctx.base_level, - ) - }; - - let compaction_input = - picker.pick_compaction(levels, level_handlers, &mut LocalPickerStatistic::default())?; - compaction_input.add_pending_task(task_id, level_handlers); - - Some(create_compaction_task( - group.compaction_config.as_ref(), - compaction_input, - base_level, - self.task_type(), - )) - } - - fn name(&self) -> &'static str { - "ManualCompactionSelector" - } - - fn task_type(&self) -> compact_task::TaskType { - compact_task::TaskType::Manual - } -} - -#[derive(Default)] -pub struct SpaceReclaimCompactionSelector { - state: HashMap, -} - -impl LevelSelector for SpaceReclaimCompactionSelector { - fn pick_compaction( - &mut self, - task_id: HummockCompactionTaskId, - group: &CompactionGroup, - levels: &Levels, - level_handlers: &mut [LevelHandler], - _selector_stats: &mut LocalSelectorStatistic, - _table_id_to_options: HashMap, - ) -> Option { - let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); - let mut picker = SpaceReclaimCompactionPicker::new( - group.compaction_config.max_space_reclaim_bytes, - levels.member_table_ids.iter().cloned().collect(), - ); - let ctx = dynamic_level_core.calculate_level_base_size(levels); - let state = self.state.entry(group.group_id).or_default(); - - let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; - compaction_input.add_pending_task(task_id, level_handlers); - - Some(create_compaction_task( - dynamic_level_core.get_config(), - compaction_input, - ctx.base_level, - self.task_type(), - )) - } - - fn name(&self) -> &'static str { - "SpaceReclaimCompaction" - } - - fn task_type(&self) -> compact_task::TaskType { - compact_task::TaskType::SpaceReclaim - } -} - -#[derive(Default)] -pub struct TtlCompactionSelector { - state: HashMap, -} - -impl LevelSelector for TtlCompactionSelector { - fn pick_compaction( - &mut self, - task_id: HummockCompactionTaskId, - group: &CompactionGroup, - levels: &Levels, - level_handlers: &mut [LevelHandler], - _selector_stats: &mut LocalSelectorStatistic, - table_id_to_options: HashMap, - ) -> Option { - let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); - let ctx = dynamic_level_core.calculate_level_base_size(levels); - let picker = TtlReclaimCompactionPicker::new( - group.compaction_config.max_space_reclaim_bytes, - table_id_to_options, - ); - let state = self.state.entry(group.group_id).or_default(); - let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; - compaction_input.add_pending_task(task_id, level_handlers); - - Some(create_compaction_task( - group.compaction_config.as_ref(), - compaction_input, - ctx.base_level, - self.task_type(), - )) - } - - fn name(&self) -> &'static str { - "TtlCompaction" - } - - fn task_type(&self) -> compact_task::TaskType { - compact_task::TaskType::Ttl - } -} - -pub fn default_level_selector() -> Box { - Box::::default() -} - #[cfg(test)] pub mod tests { - use std::ops::Range; + use std::collections::HashMap; + use std::sync::Arc; use itertools::Itertools; use risingwave_common::constants::hummock::CompactionFilterFlag; use risingwave_pb::hummock::compaction_config::CompactionMode; - use risingwave_pb::hummock::{KeyRange, Level, LevelType, OverlappingLevel, SstableInfo}; + use risingwave_pb::hummock::hummock_version::Levels; - use super::*; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::test_utils::iterator_test_key_of_epoch; - - pub fn push_table_level0_overlapping(levels: &mut Levels, sst: SstableInfo) { - levels.l0.as_mut().unwrap().total_file_size += sst.file_size; - levels.l0.as_mut().unwrap().sub_levels.push(Level { - level_idx: 0, - level_type: LevelType::Overlapping as i32, - total_file_size: sst.file_size, - uncompressed_file_size: sst.uncompressed_file_size, - sub_level_id: sst.get_sst_id(), - table_infos: vec![sst], - }); - } - - pub fn push_table_level0_nonoverlapping(levels: &mut Levels, sst: SstableInfo) { - push_table_level0_overlapping(levels, sst); - levels - .l0 - .as_mut() - .unwrap() - .sub_levels - .last_mut() - .unwrap() - .level_type = LevelType::Nonoverlapping as i32; - } - - pub fn push_tables_level0_nonoverlapping(levels: &mut Levels, table_infos: Vec) { - let total_file_size = table_infos.iter().map(|table| table.file_size).sum::(); - let uncompressed_file_size = table_infos - .iter() - .map(|table| table.uncompressed_file_size) - .sum(); - let sub_level_id = table_infos[0].get_sst_id(); - levels.l0.as_mut().unwrap().total_file_size += total_file_size; - levels.l0.as_mut().unwrap().sub_levels.push(Level { - level_idx: 0, - level_type: LevelType::Nonoverlapping as i32, - total_file_size, - sub_level_id, - table_infos, - uncompressed_file_size, - }); - } - - pub fn generate_table( - id: u64, - table_prefix: u64, - left: usize, - right: usize, - epoch: u64, - ) -> SstableInfo { - SstableInfo { - object_id: id, - sst_id: id, - key_range: Some(KeyRange { - left: iterator_test_key_of_epoch(table_prefix, left, epoch), - right: iterator_test_key_of_epoch(table_prefix, right, epoch), - right_exclusive: false, - }), - file_size: (right - left + 1) as u64, - table_ids: vec![table_prefix as u32], - uncompressed_file_size: (right - left + 1) as u64, - total_key_count: (right - left + 1) as u64, - ..Default::default() - } - } - - #[allow(clippy::too_many_arguments)] - pub fn generate_table_with_ids_and_epochs( - id: u64, - table_prefix: u64, - left: usize, - right: usize, - epoch: u64, - table_ids: Vec, - min_epoch: u64, - max_epoch: u64, - ) -> SstableInfo { - SstableInfo { - object_id: id, - sst_id: id, - key_range: Some(KeyRange { - left: iterator_test_key_of_epoch(table_prefix, left, epoch), - right: iterator_test_key_of_epoch(table_prefix, right, epoch), - right_exclusive: false, - }), - file_size: (right - left + 1) as u64, - table_ids, - uncompressed_file_size: (right - left + 1) as u64, - min_epoch, - max_epoch, - ..Default::default() - } - } - - pub fn generate_tables( - ids: Range, - keys: Range, - epoch: u64, - file_size: u64, - ) -> Vec { - let step = (keys.end - keys.start) / (ids.end - ids.start) as usize; - let mut start = keys.start; - let mut tables = vec![]; - for id in ids { - let mut table = generate_table(id, 1, start, start + step - 1, epoch); - table.file_size = file_size; - tables.push(table); - start += step; - } - tables - } - - pub fn generate_level(level_idx: u32, table_infos: Vec) -> Level { - let total_file_size = table_infos.iter().map(|sst| sst.file_size).sum(); - let uncompressed_file_size = table_infos - .iter() - .map(|sst| sst.uncompressed_file_size) - .sum(); - Level { - level_idx, - level_type: LevelType::Nonoverlapping as i32, - table_infos, - total_file_size, - sub_level_id: 0, - uncompressed_file_size, - } - } - - /// Returns a `OverlappingLevel`, with each `table_infos`'s element placed in a nonoverlapping - /// sub-level. - pub fn generate_l0_nonoverlapping_sublevels(table_infos: Vec) -> OverlappingLevel { - let total_file_size = table_infos.iter().map(|table| table.file_size).sum::(); - let uncompressed_file_size = table_infos - .iter() - .map(|table| table.uncompressed_file_size) - .sum::(); - OverlappingLevel { - sub_levels: table_infos - .into_iter() - .enumerate() - .map(|(idx, table)| Level { - level_idx: 0, - level_type: LevelType::Nonoverlapping as i32, - total_file_size: table.file_size, - uncompressed_file_size: table.uncompressed_file_size, - sub_level_id: idx as u64, - table_infos: vec![table], - }) - .collect_vec(), - total_file_size, - uncompressed_file_size, - } - } - - pub fn generate_l0_nonoverlapping_multi_sublevels( - table_infos: Vec>, - ) -> OverlappingLevel { - let mut l0 = OverlappingLevel { - sub_levels: table_infos - .into_iter() - .enumerate() - .map(|(idx, table)| Level { - level_idx: 0, - level_type: LevelType::Nonoverlapping as i32, - total_file_size: table.iter().map(|table| table.file_size).sum::(), - uncompressed_file_size: table - .iter() - .map(|sst| sst.uncompressed_file_size) - .sum::(), - sub_level_id: idx as u64, - table_infos: table, - }) - .collect_vec(), - total_file_size: 0, - uncompressed_file_size: 0, - }; - - l0.total_file_size = l0.sub_levels.iter().map(|l| l.total_file_size).sum::(); - l0.uncompressed_file_size = l0 - .sub_levels - .iter() - .map(|l| l.uncompressed_file_size) - .sum::(); - l0 - } - - /// Returns a `OverlappingLevel`, with each `table_infos`'s element placed in a overlapping - /// sub-level. - pub fn generate_l0_overlapping_sublevels( - table_infos: Vec>, - ) -> OverlappingLevel { - let mut l0 = OverlappingLevel { - sub_levels: table_infos - .into_iter() - .enumerate() - .map(|(idx, table)| Level { - level_idx: 0, - level_type: LevelType::Overlapping as i32, - total_file_size: table.iter().map(|table| table.file_size).sum::(), - sub_level_id: idx as u64, - table_infos: table.clone(), - uncompressed_file_size: table - .iter() - .map(|sst| sst.uncompressed_file_size) - .sum::(), - }) - .collect_vec(), - total_file_size: 0, - uncompressed_file_size: 0, - }; - l0.total_file_size = l0.sub_levels.iter().map(|l| l.total_file_size).sum::(); - l0.uncompressed_file_size = l0 - .sub_levels - .iter() - .map(|l| l.uncompressed_file_size) - .sum::(); - l0 - } - - pub(crate) fn assert_compaction_task( - compact_task: &CompactionTask, - level_handlers: &[LevelHandler], - ) { - for i in &compact_task.input.input_levels { - for t in &i.table_infos { - assert!(level_handlers[i.level_idx as usize].is_pending_compact(&t.sst_id)); - } - } - } + use crate::hummock::compaction::selector::tests::{ + assert_compaction_task, generate_l0_nonoverlapping_sublevels, generate_level, + generate_tables, push_tables_level0_nonoverlapping, + }; + use crate::hummock::compaction::selector::{ + CompactionSelector, DynamicLevelSelector, DynamicLevelSelectorCore, LocalSelectorStatistic, + }; + use crate::hummock::level_handler::LevelHandler; + use crate::hummock::model::CompactionGroup; #[test] fn test_dynamic_level() { diff --git a/src/meta/src/hummock/compaction/selector/manual_selector.rs b/src/meta/src/hummock/compaction/selector/manual_selector.rs new file mode 100644 index 0000000000000..a00565a9807cd --- /dev/null +++ b/src/meta/src/hummock/compaction/selector/manual_selector.rs @@ -0,0 +1,122 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +use std::collections::{HashMap, HashSet}; + +use risingwave_common::catalog::TableOption; +use risingwave_hummock_sdk::compaction_group::StateTableId; +use risingwave_hummock_sdk::{HummockCompactionTaskId, HummockSstableId}; +use risingwave_pb::hummock::hummock_version::Levels; +use risingwave_pb::hummock::{compact_task, KeyRange}; + +use super::{CompactionSelector, DynamicLevelSelectorCore, LocalSelectorStatistic}; +use crate::hummock::compaction::picker::{ + CompactionPicker, LocalPickerStatistic, ManualCompactionPicker, +}; +use crate::hummock::compaction::{create_compaction_task, create_overlap_strategy, CompactionTask}; +use crate::hummock::level_handler::LevelHandler; +use crate::hummock::model::CompactionGroup; + +#[derive(Clone, Debug, PartialEq)] +pub struct ManualCompactionOption { + /// Filters out SSTs to pick. Has no effect if empty. + pub sst_ids: Vec, + /// Filters out SSTs to pick. + pub key_range: KeyRange, + /// Filters out SSTs to pick. Has no effect if empty. + pub internal_table_id: HashSet, + /// Input level. + pub level: usize, +} + +impl Default for ManualCompactionOption { + fn default() -> Self { + Self { + sst_ids: vec![], + key_range: KeyRange { + left: vec![], + right: vec![], + right_exclusive: false, + }, + internal_table_id: HashSet::default(), + level: 1, + } + } +} + +pub struct ManualCompactionSelector { + option: ManualCompactionOption, +} + +impl ManualCompactionSelector { + pub fn new(option: ManualCompactionOption) -> Self { + Self { option } + } +} + +impl CompactionSelector for ManualCompactionSelector { + fn pick_compaction( + &mut self, + task_id: HummockCompactionTaskId, + group: &CompactionGroup, + levels: &Levels, + level_handlers: &mut [LevelHandler], + _selector_stats: &mut LocalSelectorStatistic, + _table_id_to_options: HashMap, + ) -> Option { + let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); + let overlap_strategy = create_overlap_strategy(group.compaction_config.compaction_mode()); + let ctx = dynamic_level_core.calculate_level_base_size(levels); + let (mut picker, base_level) = { + let target_level = if self.option.level == 0 { + ctx.base_level + } else if self.option.level == group.compaction_config.max_level as usize { + self.option.level + } else { + self.option.level + 1 + }; + if self.option.level > 0 && self.option.level < ctx.base_level { + return None; + } + ( + ManualCompactionPicker::new(overlap_strategy, self.option.clone(), target_level), + ctx.base_level, + ) + }; + + let compaction_input = + picker.pick_compaction(levels, level_handlers, &mut LocalPickerStatistic::default())?; + compaction_input.add_pending_task(task_id, level_handlers); + + Some(create_compaction_task( + group.compaction_config.as_ref(), + compaction_input, + base_level, + self.task_type(), + )) + } + + fn name(&self) -> &'static str { + "ManualCompactionSelector" + } + + fn task_type(&self) -> compact_task::TaskType { + compact_task::TaskType::Manual + } +} diff --git a/src/meta/src/hummock/compaction/selector/mod.rs b/src/meta/src/hummock/compaction/selector/mod.rs new file mode 100644 index 0000000000000..ef640b5e611ab --- /dev/null +++ b/src/meta/src/hummock/compaction/selector/mod.rs @@ -0,0 +1,345 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +mod emergency_selector; +pub(crate) mod level_selector; +mod manual_selector; +mod space_reclaim_selector; +mod tombstone_compaction_selector; +mod ttl_selector; + +use std::collections::HashMap; + +pub use emergency_selector::EmergencySelector; +pub use level_selector::{DynamicLevelSelector, DynamicLevelSelectorCore}; +pub use manual_selector::{ManualCompactionOption, ManualCompactionSelector}; +use risingwave_common::catalog::TableOption; +use risingwave_hummock_sdk::HummockCompactionTaskId; +use risingwave_pb::hummock::compact_task; +use risingwave_pb::hummock::hummock_version::Levels; +pub use space_reclaim_selector::SpaceReclaimCompactionSelector; +pub use tombstone_compaction_selector::TombstoneCompactionSelector; +pub use ttl_selector::TtlCompactionSelector; + +use super::picker::LocalPickerStatistic; +use super::{create_compaction_task, LevelCompactionPicker, TierCompactionPicker}; +use crate::hummock::compaction::CompactionTask; +use crate::hummock::level_handler::LevelHandler; +use crate::hummock::model::CompactionGroup; +use crate::rpc::metrics::MetaMetrics; + +pub trait CompactionSelector: Sync + Send { + fn pick_compaction( + &mut self, + task_id: HummockCompactionTaskId, + group: &CompactionGroup, + levels: &Levels, + level_handlers: &mut [LevelHandler], + selector_stats: &mut LocalSelectorStatistic, + table_id_to_options: HashMap, + ) -> Option; + + fn report_statistic_metrics(&self, _metrics: &MetaMetrics) {} + + fn name(&self) -> &'static str; + + fn task_type(&self) -> compact_task::TaskType; +} + +pub fn default_compaction_selector() -> Box { + Box::::default() +} + +#[derive(Default)] +pub struct LocalSelectorStatistic { + skip_picker: Vec<(usize, usize, LocalPickerStatistic)>, +} + +impl LocalSelectorStatistic { + pub fn report_to_metrics(&self, group_id: u64, metrics: &MetaMetrics) { + for (start_level, target_level, stats) in &self.skip_picker { + let level_label = format!("cg{}-{}-to-{}", group_id, start_level, target_level); + if stats.skip_by_write_amp_limit > 0 { + metrics + .compact_skip_frequency + .with_label_values(&[level_label.as_str(), "write-amp"]) + .inc(); + } + if stats.skip_by_count_limit > 0 { + metrics + .compact_skip_frequency + .with_label_values(&[level_label.as_str(), "count"]) + .inc(); + } + if stats.skip_by_pending_files > 0 { + metrics + .compact_skip_frequency + .with_label_values(&[level_label.as_str(), "pending-files"]) + .inc(); + } + if stats.skip_by_overlapping > 0 { + metrics + .compact_skip_frequency + .with_label_values(&[level_label.as_str(), "overlapping"]) + .inc(); + } + metrics + .compact_skip_frequency + .with_label_values(&[level_label.as_str(), "picker"]) + .inc(); + } + } +} + +#[cfg(test)] +pub mod tests { + use std::ops::Range; + + use itertools::Itertools; + use risingwave_pb::hummock::{KeyRange, Level, LevelType, OverlappingLevel, SstableInfo}; + + use super::*; + use crate::hummock::test_utils::iterator_test_key_of_epoch; + + pub fn push_table_level0_overlapping(levels: &mut Levels, sst: SstableInfo) { + levels.l0.as_mut().unwrap().total_file_size += sst.file_size; + levels.l0.as_mut().unwrap().sub_levels.push(Level { + level_idx: 0, + level_type: LevelType::Overlapping as i32, + total_file_size: sst.file_size, + uncompressed_file_size: sst.uncompressed_file_size, + sub_level_id: sst.get_sst_id(), + table_infos: vec![sst], + }); + } + + pub fn push_table_level0_nonoverlapping(levels: &mut Levels, sst: SstableInfo) { + push_table_level0_overlapping(levels, sst); + levels + .l0 + .as_mut() + .unwrap() + .sub_levels + .last_mut() + .unwrap() + .level_type = LevelType::Nonoverlapping as i32; + } + + pub fn push_tables_level0_nonoverlapping(levels: &mut Levels, table_infos: Vec) { + let total_file_size = table_infos.iter().map(|table| table.file_size).sum::(); + let uncompressed_file_size = table_infos + .iter() + .map(|table| table.uncompressed_file_size) + .sum(); + let sub_level_id = table_infos[0].get_sst_id(); + levels.l0.as_mut().unwrap().total_file_size += total_file_size; + levels.l0.as_mut().unwrap().sub_levels.push(Level { + level_idx: 0, + level_type: LevelType::Nonoverlapping as i32, + total_file_size, + sub_level_id, + table_infos, + uncompressed_file_size, + }); + } + + pub fn generate_table( + id: u64, + table_prefix: u64, + left: usize, + right: usize, + epoch: u64, + ) -> SstableInfo { + SstableInfo { + object_id: id, + sst_id: id, + key_range: Some(KeyRange { + left: iterator_test_key_of_epoch(table_prefix, left, epoch), + right: iterator_test_key_of_epoch(table_prefix, right, epoch), + right_exclusive: false, + }), + file_size: (right - left + 1) as u64, + table_ids: vec![table_prefix as u32], + uncompressed_file_size: (right - left + 1) as u64, + total_key_count: (right - left + 1) as u64, + ..Default::default() + } + } + + #[allow(clippy::too_many_arguments)] + pub fn generate_table_with_ids_and_epochs( + id: u64, + table_prefix: u64, + left: usize, + right: usize, + epoch: u64, + table_ids: Vec, + min_epoch: u64, + max_epoch: u64, + ) -> SstableInfo { + SstableInfo { + object_id: id, + sst_id: id, + key_range: Some(KeyRange { + left: iterator_test_key_of_epoch(table_prefix, left, epoch), + right: iterator_test_key_of_epoch(table_prefix, right, epoch), + right_exclusive: false, + }), + file_size: (right - left + 1) as u64, + table_ids, + uncompressed_file_size: (right - left + 1) as u64, + min_epoch, + max_epoch, + ..Default::default() + } + } + + pub fn generate_tables( + ids: Range, + keys: Range, + epoch: u64, + file_size: u64, + ) -> Vec { + let step = (keys.end - keys.start) / (ids.end - ids.start) as usize; + let mut start = keys.start; + let mut tables = vec![]; + for id in ids { + let mut table = generate_table(id, 1, start, start + step - 1, epoch); + table.file_size = file_size; + tables.push(table); + start += step; + } + tables + } + + pub fn generate_level(level_idx: u32, table_infos: Vec) -> Level { + let total_file_size = table_infos.iter().map(|sst| sst.file_size).sum(); + let uncompressed_file_size = table_infos + .iter() + .map(|sst| sst.uncompressed_file_size) + .sum(); + Level { + level_idx, + level_type: LevelType::Nonoverlapping as i32, + table_infos, + total_file_size, + sub_level_id: 0, + uncompressed_file_size, + } + } + + /// Returns a `OverlappingLevel`, with each `table_infos`'s element placed in a nonoverlapping + /// sub-level. + pub fn generate_l0_nonoverlapping_sublevels(table_infos: Vec) -> OverlappingLevel { + let total_file_size = table_infos.iter().map(|table| table.file_size).sum::(); + let uncompressed_file_size = table_infos + .iter() + .map(|table| table.uncompressed_file_size) + .sum::(); + OverlappingLevel { + sub_levels: table_infos + .into_iter() + .enumerate() + .map(|(idx, table)| Level { + level_idx: 0, + level_type: LevelType::Nonoverlapping as i32, + total_file_size: table.file_size, + uncompressed_file_size: table.uncompressed_file_size, + sub_level_id: idx as u64, + table_infos: vec![table], + }) + .collect_vec(), + total_file_size, + uncompressed_file_size, + } + } + + pub fn generate_l0_nonoverlapping_multi_sublevels( + table_infos: Vec>, + ) -> OverlappingLevel { + let mut l0 = OverlappingLevel { + sub_levels: table_infos + .into_iter() + .enumerate() + .map(|(idx, table)| Level { + level_idx: 0, + level_type: LevelType::Nonoverlapping as i32, + total_file_size: table.iter().map(|table| table.file_size).sum::(), + uncompressed_file_size: table + .iter() + .map(|sst| sst.uncompressed_file_size) + .sum::(), + sub_level_id: idx as u64, + table_infos: table, + }) + .collect_vec(), + total_file_size: 0, + uncompressed_file_size: 0, + }; + + l0.total_file_size = l0.sub_levels.iter().map(|l| l.total_file_size).sum::(); + l0.uncompressed_file_size = l0 + .sub_levels + .iter() + .map(|l| l.uncompressed_file_size) + .sum::(); + l0 + } + + /// Returns a `OverlappingLevel`, with each `table_infos`'s element placed in a overlapping + /// sub-level. + pub fn generate_l0_overlapping_sublevels( + table_infos: Vec>, + ) -> OverlappingLevel { + let mut l0 = OverlappingLevel { + sub_levels: table_infos + .into_iter() + .enumerate() + .map(|(idx, table)| Level { + level_idx: 0, + level_type: LevelType::Overlapping as i32, + total_file_size: table.iter().map(|table| table.file_size).sum::(), + sub_level_id: idx as u64, + table_infos: table.clone(), + uncompressed_file_size: table + .iter() + .map(|sst| sst.uncompressed_file_size) + .sum::(), + }) + .collect_vec(), + total_file_size: 0, + uncompressed_file_size: 0, + }; + l0.total_file_size = l0.sub_levels.iter().map(|l| l.total_file_size).sum::(); + l0.uncompressed_file_size = l0 + .sub_levels + .iter() + .map(|l| l.uncompressed_file_size) + .sum::(); + l0 + } + + pub fn assert_compaction_task(compact_task: &CompactionTask, level_handlers: &[LevelHandler]) { + for i in &compact_task.input.input_levels { + for t in &i.table_infos { + assert!(level_handlers[i.level_idx as usize].is_pending_compact(&t.sst_id)); + } + } + } +} diff --git a/src/meta/src/hummock/compaction/selector/space_reclaim_selector.rs b/src/meta/src/hummock/compaction/selector/space_reclaim_selector.rs new file mode 100644 index 0000000000000..48941a4273d66 --- /dev/null +++ b/src/meta/src/hummock/compaction/selector/space_reclaim_selector.rs @@ -0,0 +1,74 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +use std::collections::HashMap; + +use risingwave_common::catalog::TableOption; +use risingwave_hummock_sdk::HummockCompactionTaskId; +use risingwave_pb::hummock::compact_task; +use risingwave_pb::hummock::hummock_version::Levels; + +use super::{CompactionSelector, DynamicLevelSelectorCore}; +use crate::hummock::compaction::picker::{SpaceReclaimCompactionPicker, SpaceReclaimPickerState}; +use crate::hummock::compaction::{create_compaction_task, CompactionTask, LocalSelectorStatistic}; +use crate::hummock::level_handler::LevelHandler; +use crate::hummock::model::CompactionGroup; + +#[derive(Default)] +pub struct SpaceReclaimCompactionSelector { + state: HashMap, +} + +impl CompactionSelector for SpaceReclaimCompactionSelector { + fn pick_compaction( + &mut self, + task_id: HummockCompactionTaskId, + group: &CompactionGroup, + levels: &Levels, + level_handlers: &mut [LevelHandler], + _selector_stats: &mut LocalSelectorStatistic, + _table_id_to_options: HashMap, + ) -> Option { + let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); + let mut picker = SpaceReclaimCompactionPicker::new( + group.compaction_config.max_space_reclaim_bytes, + levels.member_table_ids.iter().cloned().collect(), + ); + let ctx = dynamic_level_core.calculate_level_base_size(levels); + let state = self.state.entry(group.group_id).or_default(); + + let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; + compaction_input.add_pending_task(task_id, level_handlers); + + Some(create_compaction_task( + dynamic_level_core.get_config(), + compaction_input, + ctx.base_level, + self.task_type(), + )) + } + + fn name(&self) -> &'static str { + "SpaceReclaimCompaction" + } + + fn task_type(&self) -> compact_task::TaskType { + compact_task::TaskType::SpaceReclaim + } +} diff --git a/src/meta/src/hummock/compaction/tombstone_compaction_selector.rs b/src/meta/src/hummock/compaction/selector/tombstone_compaction_selector.rs similarity index 88% rename from src/meta/src/hummock/compaction/tombstone_compaction_selector.rs rename to src/meta/src/hummock/compaction/selector/tombstone_compaction_selector.rs index f6a26dcc13013..505c9b47e30c9 100644 --- a/src/meta/src/hummock/compaction/tombstone_compaction_selector.rs +++ b/src/meta/src/hummock/compaction/selector/tombstone_compaction_selector.rs @@ -19,12 +19,12 @@ use risingwave_hummock_sdk::HummockCompactionTaskId; use risingwave_pb::hummock::compact_task; use risingwave_pb::hummock::hummock_version::Levels; +use super::{CompactionSelector, DynamicLevelSelectorCore}; use crate::hummock::compaction::picker::{ TombstoneReclaimCompactionPicker, TombstoneReclaimPickerState, }; use crate::hummock::compaction::{ - create_compaction_task, create_overlap_strategy, CompactionTask, DynamicLevelSelectorCore, - LevelSelector, LocalSelectorStatistic, + create_compaction_task, create_overlap_strategy, CompactionTask, LocalSelectorStatistic, }; use crate::hummock::level_handler::LevelHandler; use crate::hummock::model::CompactionGroup; @@ -34,7 +34,7 @@ pub struct TombstoneCompactionSelector { state: HashMap, } -impl LevelSelector for TombstoneCompactionSelector { +impl CompactionSelector for TombstoneCompactionSelector { fn pick_compaction( &mut self, task_id: HummockCompactionTaskId, @@ -44,11 +44,15 @@ impl LevelSelector for TombstoneCompactionSelector { _selector_stats: &mut LocalSelectorStatistic, _table_id_to_options: HashMap, ) -> Option { + if group.compaction_config.tombstone_reclaim_ratio == 0 { + // it might cause full-compaction when tombstone_reclaim_ratio == 0 + return None; + } + let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); let ctx = dynamic_level_core.calculate_level_base_size(levels); let picker = TombstoneReclaimCompactionPicker::new( create_overlap_strategy(group.compaction_config.compaction_mode()), - group.compaction_config.max_compaction_bytes, group.compaction_config.tombstone_reclaim_ratio as u64, group.compaction_config.tombstone_reclaim_ratio as u64 / 2, ); diff --git a/src/meta/src/hummock/compaction/selector/ttl_selector.rs b/src/meta/src/hummock/compaction/selector/ttl_selector.rs new file mode 100644 index 0000000000000..ded292ef2021e --- /dev/null +++ b/src/meta/src/hummock/compaction/selector/ttl_selector.rs @@ -0,0 +1,70 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +use std::collections::HashMap; + +use risingwave_common::catalog::TableOption; +use risingwave_hummock_sdk::HummockCompactionTaskId; +use risingwave_pb::hummock::compact_task; +use risingwave_pb::hummock::hummock_version::Levels; + +use super::{CompactionSelector, DynamicLevelSelectorCore}; +use crate::hummock::compaction::picker::{TtlPickerState, TtlReclaimCompactionPicker}; +use crate::hummock::compaction::{create_compaction_task, CompactionTask, LocalSelectorStatistic}; +use crate::hummock::level_handler::LevelHandler; +use crate::hummock::model::CompactionGroup; + +#[derive(Default)] +pub struct TtlCompactionSelector { + state: HashMap, +} + +impl CompactionSelector for TtlCompactionSelector { + fn pick_compaction( + &mut self, + task_id: HummockCompactionTaskId, + group: &CompactionGroup, + levels: &Levels, + level_handlers: &mut [LevelHandler], + _selector_stats: &mut LocalSelectorStatistic, + table_id_to_options: HashMap, + ) -> Option { + let dynamic_level_core = DynamicLevelSelectorCore::new(group.compaction_config.clone()); + let ctx = dynamic_level_core.calculate_level_base_size(levels); + let picker = TtlReclaimCompactionPicker::new(table_id_to_options); + let state = self.state.entry(group.group_id).or_default(); + let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; + compaction_input.add_pending_task(task_id, level_handlers); + + Some(create_compaction_task( + group.compaction_config.as_ref(), + compaction_input, + ctx.base_level, + self.task_type(), + )) + } + + fn name(&self) -> &'static str { + "TtlCompaction" + } + + fn task_type(&self) -> compact_task::TaskType { + compact_task::TaskType::Ttl + } +} diff --git a/src/meta/src/hummock/compactor_manager.rs b/src/meta/src/hummock/compactor_manager.rs index c6dd5e2f82387..c3e62d98a190f 100644 --- a/src/meta/src/hummock/compactor_manager.rs +++ b/src/meta/src/hummock/compactor_manager.rs @@ -453,7 +453,7 @@ mod tests { use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; use risingwave_pb::hummock::CompactTaskProgress; - use crate::hummock::compaction::default_level_selector; + use crate::hummock::compaction::selector::default_compaction_selector; use crate::hummock::test_utils::{ add_ssts, register_table_ids_to_compaction_group, setup_compute_env, }; @@ -477,7 +477,7 @@ mod tests { hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() diff --git a/src/meta/src/hummock/manager/checkpoint.rs b/src/meta/src/hummock/manager/checkpoint.rs index d0c8428abedc5..6aa64292b9db1 100644 --- a/src/meta/src/hummock/manager/checkpoint.rs +++ b/src/meta/src/hummock/manager/checkpoint.rs @@ -36,11 +36,11 @@ const HUMMOCK_INIT_FLAG_KEY: &[u8] = b"hummock_init_flag"; impl HummockManager { /// # Panics /// if checkpoint is not found. - pub(crate) async fn read_checkpoint(&self) -> Result { + pub async fn read_checkpoint(&self) -> Result { use prost::Message; let data = match self .object_store - .read(&self.version_checkpoint_path, None) + .read(&self.version_checkpoint_path, ..) .await { Ok(data) => data, @@ -173,23 +173,23 @@ impl HummockManager { .map_err(Into::into) } - pub(crate) fn pause_version_checkpoint(&self) { + pub fn pause_version_checkpoint(&self) { self.pause_version_checkpoint.store(true, Ordering::Relaxed); tracing::info!("hummock version checkpoint is paused."); } - pub(crate) fn resume_version_checkpoint(&self) { + pub fn resume_version_checkpoint(&self) { self.pause_version_checkpoint .store(false, Ordering::Relaxed); tracing::info!("hummock version checkpoint is resumed."); } - pub(crate) fn is_version_checkpoint_paused(&self) -> bool { + pub fn is_version_checkpoint_paused(&self) -> bool { self.pause_version_checkpoint.load(Ordering::Relaxed) } #[named] - pub(crate) async fn get_checkpoint_version(&self) -> HummockVersion { + pub async fn get_checkpoint_version(&self) -> HummockVersion { let versioning_guard = read_lock!(self, versioning).await; versioning_guard .checkpoint diff --git a/src/meta/src/hummock/manager/compaction.rs b/src/meta/src/hummock/manager/compaction.rs index db34da26e62fd..3701daaa4c6ee 100644 --- a/src/meta/src/hummock/manager/compaction.rs +++ b/src/meta/src/hummock/manager/compaction.rs @@ -19,6 +19,8 @@ use itertools::Itertools; use risingwave_hummock_sdk::{CompactionGroupId, HummockCompactionTaskId}; use risingwave_pb::hummock::{CompactStatus as PbCompactStatus, CompactTaskAssignment}; +use crate::hummock::compaction::selector::level_selector::PickerInfo; +use crate::hummock::compaction::selector::DynamicLevelSelectorCore; use crate::hummock::compaction::CompactStatus; use crate::hummock::manager::read_lock; use crate::hummock::HummockManager; @@ -71,4 +73,29 @@ impl HummockManager { .collect(), ) } + + #[named] + pub async fn get_compaction_scores( + &self, + compaction_group_id: CompactionGroupId, + ) -> Vec { + let (status, levels, config) = { + let compaction = read_lock!(self, compaction).await; + let versioning = read_lock!(self, versioning).await; + let config_manager = self.compaction_group_manager.read().await; + match ( + compaction.compaction_statuses.get(&compaction_group_id), + versioning.current_version.levels.get(&compaction_group_id), + config_manager.try_get_compaction_group_config(compaction_group_id), + ) { + (Some(cs), Some(v), Some(cf)) => (cs.to_owned(), v.to_owned(), cf), + _ => { + return vec![]; + } + } + }; + let dynamic_level_core = DynamicLevelSelectorCore::new(config.compaction_config); + let ctx = dynamic_level_core.get_priority_levels(&levels, &status.level_handlers); + ctx.score_levels + } } diff --git a/src/meta/src/hummock/manager/compaction_group_manager.rs b/src/meta/src/hummock/manager/compaction_group_manager.rs index 8fa1aea32115f..f3853c8d08df5 100644 --- a/src/meta/src/hummock/manager/compaction_group_manager.rs +++ b/src/meta/src/hummock/manager/compaction_group_manager.rs @@ -255,7 +255,7 @@ impl HummockManager { assert!(sst_split_info.is_empty()); let mut trx = Transaction::default(); - new_version_delta.apply_to_txn(&mut trx)?; + new_version_delta.apply_to_txn(&mut trx).await?; self.env.meta_store().txn(trx).await?; versioning.current_version = current_version; new_version_delta.commit(); @@ -350,7 +350,7 @@ impl HummockManager { assert!(sst_split_info.is_empty()); let mut trx = Transaction::default(); - new_version_delta.apply_to_txn(&mut trx)?; + new_version_delta.apply_to_txn(&mut trx).await?; self.env.meta_store().txn(trx).await?; for group_id in &groups_to_remove { let max_level = versioning @@ -386,8 +386,9 @@ impl HummockManager { &self, compaction_group_ids: &[CompactionGroupId], config_to_update: &[MutableConfig], - ) -> Result<()> { - self.compaction_group_manager + ) -> Result> { + let result = self + .compaction_group_manager .write() .await .update_compaction_config( @@ -402,7 +403,7 @@ impl HummockManager { { self.try_update_write_limits(compaction_group_ids).await; } - Ok(()) + Ok(result) } /// Gets complete compaction group info. @@ -475,6 +476,7 @@ impl HummockManager { ))); } } + if table_ids.len() == parent_group.member_table_ids.len() { return Err(Error::CompactionGroup(format!( "invalid split attempt for group {}: all member tables are moved", @@ -593,11 +595,13 @@ impl HummockManager { new_compaction_group_id } }; + let mut current_version = versioning.current_version.clone(); let sst_split_info = current_version.apply_version_delta(&new_version_delta); - let mut branched_ssts = BTreeMapTransaction::new(&mut versioning.branched_ssts); + + let mut branched_ssts = BTreeMapTransaction::<'_, _, _>::new(&mut versioning.branched_ssts); let mut trx = Transaction::default(); - new_version_delta.apply_to_txn(&mut trx)?; + new_version_delta.apply_to_txn(&mut trx).await?; if let Some((new_compaction_group_id, config)) = new_group { let mut compaction_group_manager = self.compaction_group_manager.write().await; let insert = BTreeMapEntryTransaction::new_insert( @@ -608,7 +612,7 @@ impl HummockManager { compaction_config: Arc::new(config), }, ); - insert.apply_to_txn(&mut trx)?; + insert.apply_to_txn(&mut trx).await?; self.env.meta_store().txn(trx).await?; insert.commit(); } else { @@ -652,10 +656,17 @@ impl HummockManager { } } } - for mut task in canceled_tasks { - task.set_task_status(TaskStatus::ManualCanceled); + + for task in canceled_tasks { if !self - .report_compact_task_impl(&mut task, &mut compaction_guard, None) + .report_compact_task_impl( + task.task_id, + None, + TaskStatus::ManualCanceled, + vec![], + &mut compaction_guard, + None, + ) .await .unwrap_or(false) { @@ -769,7 +780,7 @@ impl CompactionGroupManager { compaction_groups.insert(*id, new_entry); } let mut trx = Transaction::default(); - compaction_groups.apply_to_txn(&mut trx)?; + compaction_groups.apply_to_txn(&mut trx).await?; meta_store.txn(trx).await?; compaction_groups.commit(); let r = compaction_group_ids @@ -791,13 +802,14 @@ impl CompactionGroupManager { self.default_config.clone() } - async fn update_compaction_config( + pub async fn update_compaction_config( &mut self, compaction_group_ids: &[CompactionGroupId], config_to_update: &[MutableConfig], meta_store: &S, - ) -> Result<()> { + ) -> Result> { let mut compaction_groups = BTreeMapTransaction::new(&mut self.compaction_groups); + let mut result = Vec::with_capacity(compaction_group_ids.len()); for compaction_group_id in compaction_group_ids.iter().unique() { let group = compaction_groups.get(compaction_group_id).ok_or_else(|| { Error::CompactionGroup(format!("invalid group {}", *compaction_group_id)) @@ -809,14 +821,15 @@ impl CompactionGroupManager { } let mut new_group = group.clone(); new_group.compaction_config = Arc::new(config); - compaction_groups.insert(*compaction_group_id, new_group); + compaction_groups.insert(*compaction_group_id, new_group.clone()); + result.push(new_group); } let mut trx = Transaction::default(); - compaction_groups.apply_to_txn(&mut trx)?; + compaction_groups.apply_to_txn(&mut trx).await?; meta_store.txn(trx).await?; compaction_groups.commit(); - Ok(()) + Ok(result) } /// Initializes the config for a group. @@ -836,7 +849,7 @@ impl CompactionGroupManager { }, ); let mut trx = Transaction::default(); - insert.apply_to_txn(&mut trx)?; + insert.apply_to_txn(&mut trx).await?; meta_store.txn(trx).await?; insert.commit(); Ok(()) @@ -862,7 +875,7 @@ impl CompactionGroupManager { compaction_groups.remove(group); } let mut trx = Transaction::default(); - compaction_groups.apply_to_txn(&mut trx)?; + compaction_groups.apply_to_txn(&mut trx).await?; meta_store.txn(trx).await?; compaction_groups.commit(); Ok(()) @@ -911,6 +924,12 @@ fn update_compaction_config(target: &mut CompactionConfig, items: &[MutableConfi MutableConfig::Level0MaxCompactFileNumber(c) => { target.level0_max_compact_file_number = *c; } + MutableConfig::EnableEmergencyPicker(c) => { + target.enable_emergency_picker = *c; + } + MutableConfig::TombstoneReclaimRatio(c) => { + target.tombstone_reclaim_ratio = *c; + } } } } diff --git a/src/meta/src/hummock/manager/context.rs b/src/meta/src/hummock/manager/context.rs index 21751bb968421..b069a31ce5bd3 100644 --- a/src/meta/src/hummock/manager/context.rs +++ b/src/meta/src/hummock/manager/context.rs @@ -112,7 +112,7 @@ impl HummockManager { Ok(invalid_context_ids) } - pub(crate) async fn commit_epoch_sanity_check( + pub async fn commit_epoch_sanity_check( &self, epoch: HummockEpoch, sstables: &Vec, diff --git a/src/meta/src/hummock/manager/mod.rs b/src/meta/src/hummock/manager/mod.rs index 03d75112d01fe..1b3a284e9ccc9 100644 --- a/src/meta/src/hummock/manager/mod.rs +++ b/src/meta/src/hummock/manager/mod.rs @@ -28,6 +28,7 @@ use futures::stream::{BoxStream, FuturesUnordered}; use futures::{FutureExt, StreamExt}; use itertools::Itertools; use parking_lot::Mutex; +use risingwave_common::config::default::compaction_config; use risingwave_common::monitor::rwlock::MonitoredRwLock; use risingwave_common::util::epoch::{Epoch, INVALID_EPOCH}; use risingwave_common::util::{pending_on_none, select_all}; @@ -44,6 +45,7 @@ use risingwave_hummock_sdk::{ }; use risingwave_pb::hummock::compact_task::{self, TaskStatus, TaskType}; use risingwave_pb::hummock::group_delta::DeltaType; +use risingwave_pb::hummock::rise_ctl_update_compaction_config_request::mutable_config; use risingwave_pb::hummock::subscribe_compaction_event_request::{ Event as RequestEvent, HeartBeat, PullTask, ReportTask, }; @@ -54,7 +56,7 @@ use risingwave_pb::hummock::{ version_update_payload, CompactTask, CompactTaskAssignment, CompactionConfig, GroupDelta, HummockPinnedSnapshot, HummockPinnedVersion, HummockSnapshot, HummockVersion, HummockVersionCheckpoint, HummockVersionDelta, HummockVersionDeltas, HummockVersionStats, - IntraLevelDelta, SubscribeCompactionEventRequest, TableOption, + IntraLevelDelta, SstableInfo, SubscribeCompactionEventRequest, TableOption, }; use risingwave_pb::meta::subscribe_response::{Info, Operation}; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; @@ -65,9 +67,11 @@ use tokio_stream::wrappers::IntervalStream; use tonic::Streaming; use tracing::warn; -use crate::hummock::compaction::{ - CompactStatus, LocalSelectorStatistic, ManualCompactionOption, TombstoneCompactionSelector, +use crate::hummock::compaction::selector::{ + DynamicLevelSelector, LocalSelectorStatistic, ManualCompactionOption, ManualCompactionSelector, + SpaceReclaimCompactionSelector, TombstoneCompactionSelector, TtlCompactionSelector, }; +use crate::hummock::compaction::CompactStatus; use crate::hummock::error::{Error, Result}; use crate::hummock::metrics_utils::{ trigger_delta_log_stats, trigger_lsm_stat, trigger_mv_stat, trigger_pin_unpin_snapshot_state, @@ -113,14 +117,13 @@ pub struct HummockManager { catalog_manager: CatalogManagerRef, fragment_manager: FragmentManagerRef, - // `CompactionGroupManager` manages `CompactionGroup`'s members. - // Note that all hummock state store user should register to `CompactionGroupManager`. It - // includes all state tables of streaming jobs except sink. - compaction_group_manager: tokio::sync::RwLock, - // When trying to locks compaction and versioning at the same time, compaction lock should - // be requested before versioning lock. + /// Lock order: compaction, versioning, compaction_group_manager. + /// - Lock compaction first, then versioning, and finally compaction_group_manager. + /// - This order should be strictly followed to prevent deadlock. compaction: MonitoredRwLock, versioning: MonitoredRwLock, + /// `CompactionGroupManager` manages compaction configs for compaction groups. + compaction_group_manager: tokio::sync::RwLock, latest_snapshot: Snapshot, pub metrics: Arc, @@ -155,7 +158,7 @@ macro_rules! commit_multi_var { let mut trx = $trx_extern_part; // Apply the change in `ValTransaction` to trx $( - $val_txn.apply_to_txn(&mut trx)?; + $val_txn.apply_to_txn(&mut trx).await?; )* // Commit to state store $hummock_mgr.commit_trx($hummock_mgr.env.meta_store(), trx, $context_id) @@ -224,10 +227,6 @@ macro_rules! start_measure_real_process_timer { } pub(crate) use start_measure_real_process_timer; -use super::compaction::{ - DynamicLevelSelector, LevelSelector, ManualCompactionSelector, SpaceReclaimCompactionSelector, - TtlCompactionSelector, -}; use crate::hummock::manager::compaction_group_manager::CompactionGroupManager; use crate::hummock::manager::worker::HummockManagerEventSender; @@ -253,7 +252,7 @@ pub enum CompactionResumeTrigger { } impl HummockManager { - pub(crate) async fn new( + pub async fn new( env: MetaSrvEnv, cluster_manager: ClusterManagerRef, fragment_manager: FragmentManagerRef, @@ -353,7 +352,12 @@ impl HummockManager { if let risingwave_object_store::object::ObjectStoreImpl::S3(s3) = object_store.as_ref() && !env.opts.do_not_config_object_storage_lifecycle { - s3.inner().configure_bucket_lifecycle().await; + let is_bucket_expiration_configured = s3.inner().configure_bucket_lifecycle().await; + if is_bucket_expiration_configured{ + return Err(ObjectError::internal("Cluster cannot start with object expiration configured for bucket because RisingWave data will be lost when object expiration kicks in. + Please disable object expiration and restart the cluster.") + .into()); + } } } let checkpoint_path = version_checkpoint_path(state_store_dir); @@ -437,29 +441,16 @@ impl HummockManager { .collect(); let mut redo_state = if self.need_init().await? { - // For backward compatibility, try to read checkpoint from meta store. - let versions = HummockVersion::list(self.env.meta_store()).await?; - let checkpoint_version = if !versions.is_empty() { - let checkpoint = versions.into_iter().next().unwrap(); - tracing::warn!( - "read hummock version checkpoint from meta store: {:#?}", - checkpoint - ); - checkpoint - } else { - // As no record found in stores, create a initial version. - let default_compaction_config = self - .compaction_group_manager - .read() - .await - .default_compaction_config(); - let checkpoint = create_init_version(default_compaction_config); - tracing::info!("init hummock version checkpoint"); - HummockVersionStats::default() - .insert(self.env.meta_store()) - .await?; - checkpoint - }; + let default_compaction_config = self + .compaction_group_manager + .read() + .await + .default_compaction_config(); + let checkpoint_version = create_init_version(default_compaction_config); + tracing::info!("init hummock version checkpoint"); + HummockVersionStats::default() + .insert(self.env.meta_store()) + .await?; versioning_guard.checkpoint = HummockVersionCheckpoint { version: Some(checkpoint_version.clone()), stale_objects: Default::default(), @@ -513,7 +504,7 @@ impl HummockManager { versioning_guard.mark_objects_for_deletion(); let all_group_ids = get_compaction_group_ids(&versioning_guard.current_version); - let configs = self + let mut configs = self .compaction_group_manager .write() .await @@ -522,6 +513,46 @@ impl HummockManager { self.env.meta_store(), ) .await?; + + // We've already lowered the default limit for write limit in PR-12183, and to prevent older clusters from continuing to use the outdated configuration, we've introduced a new logic to rewrite it in a uniform way. + let mut rewrite_cg_ids = vec![]; + for (cg_id, compaction_group_config) in &mut configs { + // update write limit + let relaxed_default_write_stop_level_count = 1000; + if compaction_group_config + .compaction_config + .level0_sub_level_compact_level_count + == relaxed_default_write_stop_level_count + { + rewrite_cg_ids.push(*cg_id); + } + } + + if !rewrite_cg_ids.is_empty() { + tracing::info!("Compaction group {:?} configs rewrite ", rewrite_cg_ids); + + // update meta store + let result = self + .compaction_group_manager + .write() + .await + .update_compaction_config( + &rewrite_cg_ids, + &[ + mutable_config::MutableConfig::Level0StopWriteThresholdSubLevelNumber( + compaction_config::level0_stop_write_threshold_sub_level_number(), + ), + ], + self.env.meta_store(), + ) + .await?; + + // update memory + for new_config in result { + configs.insert(new_config.group_id(), new_config); + } + } + versioning_guard.write_limit = calc_new_write_limits(configs, HashMap::new(), &versioning_guard.current_version); trigger_write_stop_stats(&self.metrics, &versioning_guard.write_limit); @@ -776,7 +807,7 @@ impl HummockManager { pub async fn get_compact_task_impl( &self, compaction_group_id: CompactionGroupId, - selector: &mut Box, + selector: &mut Box, ) -> Result> { // TODO: `get_all_table_options` will hold catalog_manager async lock, to avoid holding the // lock in compaction_guard, take out all table_options in advance there may be a @@ -837,7 +868,8 @@ impl HummockManager { return Ok(None); } - let can_trivial_move = matches!(selector.task_type(), compact_task::TaskType::Dynamic); + let can_trivial_move = matches!(selector.task_type(), compact_task::TaskType::Dynamic) + || matches!(selector.task_type(), compact_task::TaskType::Emergency); let mut stats = LocalSelectorStatistic::default(); let member_table_ids = ¤t_version @@ -863,6 +895,7 @@ impl HummockManager { } Some(task) => task, }; + compact_task.watermark = watermark; compact_task.existing_table_ids = current_version .levels @@ -875,8 +908,15 @@ impl HummockManager { if is_trivial_reclaim { compact_task.set_task_status(TaskStatus::Success); - self.report_compact_task_impl(&mut compact_task, &mut compaction_guard, None) - .await?; + self.report_compact_task_impl( + task_id, + Some(compact_task.clone()), + TaskStatus::Success, + vec![], + &mut compaction_guard, + None, + ) + .await?; tracing::debug!( "TrivialReclaim for compaction group {}: remove {} sstables, cost time: {:?}", compaction_group_id, @@ -888,11 +928,19 @@ impl HummockManager { start_time.elapsed() ); } else if is_trivial_move && can_trivial_move { - compact_task.sorted_output_ssts = compact_task.input_ssts[0].table_infos.clone(); // this task has been finished and `trivial_move_task` does not need to be schedule. compact_task.set_task_status(TaskStatus::Success); - self.report_compact_task_impl(&mut compact_task, &mut compaction_guard, None) - .await?; + compact_task.sorted_output_ssts = compact_task.input_ssts[0].table_infos.clone(); + self.report_compact_task_impl( + task_id, + Some(compact_task.clone()), + TaskStatus::Success, + compact_task.input_ssts[0].table_infos.clone(), + &mut compaction_guard, + None, + ) + .await?; + tracing::debug!( "TrivialMove for compaction group {}: pick up {} sstables in level {} to compact to target_level {} cost time: {:?}", compaction_group_id, @@ -1006,24 +1054,30 @@ impl HummockManager { } /// Cancels a compaction task no matter it's assigned or unassigned. - pub async fn cancel_compact_task( - &self, - compact_task: &mut CompactTask, - task_status: TaskStatus, - ) -> Result { - compact_task.set_task_status(task_status); + pub async fn cancel_compact_task(&self, task_id: u64, task_status: TaskStatus) -> Result { fail_point!("fp_cancel_compact_task", |_| Err(Error::MetaStore( anyhow::anyhow!("failpoint metastore err") ))); - self.cancel_compact_task_impl(compact_task).await + self.cancel_compact_task_impl(task_id, task_status).await } #[named] - pub async fn cancel_compact_task_impl(&self, compact_task: &mut CompactTask) -> Result { - assert!(CANCEL_STATUS_SET.contains(&compact_task.task_status())); + pub async fn cancel_compact_task_impl( + &self, + task_id: u64, + task_status: TaskStatus, + ) -> Result { + assert!(CANCEL_STATUS_SET.contains(&task_status)); let mut compaction_guard = write_lock!(self, compaction).await; let ret = self - .report_compact_task_impl(compact_task, &mut compaction_guard, None) + .report_compact_task_impl( + task_id, + None, + task_status, + vec![], + &mut compaction_guard, + None, + ) .await?; #[cfg(test)] { @@ -1055,7 +1109,7 @@ impl HummockManager { pub async fn get_compact_task( &self, compaction_group_id: CompactionGroupId, - selector: &mut Box, + selector: &mut Box, ) -> Result> { fail_point!("fp_get_compact_task", |_| Err(Error::MetaStore( anyhow::anyhow!("failpoint metastore error") @@ -1082,7 +1136,7 @@ impl HummockManager { compaction_group_id: CompactionGroupId, manual_compaction_option: ManualCompactionOption, ) -> Result> { - let mut selector: Box = + let mut selector: Box = Box::new(ManualCompactionSelector::new(manual_compaction_option)); self.get_compact_task(compaction_group_id, &mut selector) .await @@ -1110,19 +1164,21 @@ impl HummockManager { #[named] pub async fn report_compact_task( &self, - compact_task: &mut CompactTask, + task_id: u64, + task_status: TaskStatus, + sorted_output_ssts: Vec, table_stats_change: Option, ) -> Result { let mut guard = write_lock!(self, compaction).await; - let ret = self - .report_compact_task_impl(compact_task, &mut guard, table_stats_change) - .await?; - #[cfg(test)] - { - drop(guard); - self.check_state_consistency().await; - } - Ok(ret) + self.report_compact_task_impl( + task_id, + None, + task_status, + sorted_output_ssts, + &mut guard, + table_stats_change, + ) + .await } /// Finishes or cancels a compaction task, according to `task_status`. @@ -1135,7 +1191,10 @@ impl HummockManager { #[named] pub async fn report_compact_task_impl( &self, - compact_task: &mut CompactTask, + task_id: u64, + trivial_move_compact_task: Option, + task_status: TaskStatus, + sorted_output_ssts: Vec, compaction_guard: &mut RwLockWriteGuard<'_, Compaction>, table_stats_change: Option, ) -> Result { @@ -1147,18 +1206,28 @@ impl HummockManager { let mut compact_task_assignment = BTreeMapTransaction::new(&mut compaction.compact_task_assignment); - let is_trivial_reclaim = CompactStatus::is_trivial_reclaim(compact_task); - let is_trivial_move = CompactStatus::is_trivial_move_task(compact_task); - // remove task_assignment - if compact_task_assignment - .remove(compact_task.task_id) - .is_none() - && !(is_trivial_reclaim || is_trivial_move) + let mut compact_task = if let Some(input_task) = trivial_move_compact_task { + input_task + } else { + match compact_task_assignment.remove(task_id) { + Some(compact_task) => compact_task.compact_task.unwrap(), + None => { + tracing::warn!("{}", format!("compact task {} not found", task_id)); + return Ok(false); + } + } + }; + { - return Ok(false); + // apply result + compact_task.set_task_status(task_status); + compact_task.sorted_output_ssts = sorted_output_ssts; } + let is_trivial_reclaim = CompactStatus::is_trivial_reclaim(&compact_task); + let is_trivial_move = CompactStatus::is_trivial_move_task(&compact_task); + { // The compaction task is finished. let mut versioning_guard = write_lock!(self, versioning).await; @@ -1173,7 +1242,7 @@ impl HummockManager { match compact_statuses.get_mut(compact_task.compaction_group_id) { Some(mut compact_status) => { - compact_status.report_compact_task(compact_task); + compact_status.report_compact_task(&compact_task); } None => { compact_task.set_task_status(TaskStatus::InvalidGroupCanceled); @@ -1197,7 +1266,7 @@ impl HummockManager { let is_success = if let TaskStatus::Success = compact_task.task_status() { // if member_table_ids changes, the data of sstable may stale. let is_expired = - Self::is_compact_task_expired(compact_task, &versioning.branched_ssts); + Self::is_compact_task_expired(&compact_task, &versioning.branched_ssts); if is_expired { compact_task.set_task_status(TaskStatus::InputOutdatedCanceled); false @@ -1212,7 +1281,7 @@ impl HummockManager { compact_task.set_task_status(TaskStatus::InvalidGroupCanceled); warn!( "The task may be expired because of group split, task:\n {:?}", - compact_task_to_string(compact_task) + compact_task_to_string(&compact_task) ); } input_exist @@ -1228,7 +1297,7 @@ impl HummockManager { &mut hummock_version_deltas, &mut branched_ssts, ¤t_version, - compact_task, + &compact_task, deterministic_mode, ); let mut version_stats = VarTransaction::new(&mut versioning.version_stats); @@ -1277,8 +1346,6 @@ impl HummockManager { let label = if is_trivial_reclaim { "trivial-space-reclaim" } else if is_trivial_move { - // TODO: only support can_trivial_move in DynamicLevelCompcation, will check - // task_type next PR "trivial-move" } else { self.compactor_manager @@ -1298,7 +1365,7 @@ impl HummockManager { tracing::trace!( "Reported compaction task. {}. cost time: {:?}", - compact_task_to_string(compact_task), + compact_task_to_string(&compact_task), start_time.elapsed(), ); @@ -1312,7 +1379,8 @@ impl HummockManager { ); if !deterministic_mode - && matches!(compact_task.task_type(), compact_task::TaskType::Dynamic) + && (matches!(compact_task.task_type(), compact_task::TaskType::Dynamic) + || matches!(compact_task.task_type(), compact_task::TaskType::Emergency)) { // only try send Dynamic compaction self.try_send_compaction_request( @@ -1426,7 +1494,7 @@ impl HummockManager { .id_gen_manager() .generate_interval::<{ IdCategory::HummockSstableId }>(new_sst_id_number as u64) .await?; - let mut branched_ssts = BTreeMapTransaction::new(&mut versioning.branched_ssts); + let mut branched_ssts = BTreeMapTransaction::<'_, _, _>::new(&mut versioning.branched_ssts); let original_sstables = std::mem::take(&mut sstables); sstables.reserve_exact(original_sstables.len() - incorrect_ssts.len() + new_sst_id_number); let mut incorrect_ssts = incorrect_ssts.into_iter(); @@ -1477,6 +1545,7 @@ impl HummockManager { .into_iter() .map(|ExtendedSstableInfo { sst_info, .. }| sst_info) .collect_vec(); + let group_deltas = &mut new_version_delta .group_deltas .entry(compaction_group_id) @@ -1692,27 +1761,22 @@ impl HummockManager { } /// Get version deltas from meta store - #[cfg_attr(coverage, no_coverage)] + #[cfg_attr(coverage, coverage(off))] + #[named] pub async fn list_version_deltas( &self, start_id: u64, num_limit: u32, committed_epoch_limit: HummockEpoch, ) -> Result { - let ordered_version_deltas: BTreeMap<_, _> = - HummockVersionDelta::list(self.env.meta_store()) - .await? - .into_iter() - .map(|version_delta| (version_delta.id, version_delta)) - .collect(); - - let version_deltas = ordered_version_deltas - .into_iter() - .filter(|(id, delta)| { - *id >= start_id && delta.max_committed_epoch <= committed_epoch_limit - }) - .map(|(_, v)| v) + let versioning = read_lock!(self, versioning).await; + let version_deltas = versioning + .hummock_version_deltas + .range(start_id..) + .map(|(_id, delta)| delta) + .filter(|delta| delta.max_committed_epoch <= committed_epoch_limit) .take(num_limit as _) + .cloned() .collect(); Ok(HummockVersionDeltas { version_deltas }) } @@ -1909,10 +1973,12 @@ impl HummockManager { Ok(()) } + #[cfg(any(test, feature = "test"))] pub fn compactor_manager_ref_for_test(&self) -> CompactorManagerRef { self.compactor_manager.clone() } + #[cfg(any(test, feature = "test"))] #[named] pub async fn compaction_task_from_assignment_for_test( &self, @@ -1923,6 +1989,31 @@ impl HummockManager { assignment_ref.get(&task_id).cloned() } + #[cfg(any(test, feature = "test"))] + #[named] + pub async fn report_compact_task_for_test( + &self, + task_id: u64, + compact_task: Option, + task_status: TaskStatus, + sorted_output_ssts: Vec, + table_stats_change: Option, + ) -> Result { + let mut guard = write_lock!(self, compaction).await; + + // In the test, the contents of the compact task may have been modified directly, while the contents of compact_task_assignment were not modified. + // So we pass the modified compact_task directly into the `report_compact_task_impl` + self.report_compact_task_impl( + task_id, + compact_task, + task_status, + sorted_output_ssts, + &mut guard, + table_stats_change, + ) + .await + } + pub fn cluster_manager(&self) -> &ClusterManagerRef { &self.cluster_manager } @@ -2192,13 +2283,12 @@ impl HummockManager { // side, and meta is just used as a last resort to clean up the // tasks that compactor has expired. - // - for mut task in + for task in compactor_manager.get_expired_tasks(Some(INTERVAL_SEC)) { if let Err(e) = hummock_manager .cancel_compact_task( - &mut task, + task.task_id, TaskStatus::HeartbeatCanceled, ) .await @@ -2469,6 +2559,7 @@ impl HummockManager { } } + #[named] pub fn compaction_event_loop( hummock_manager: Arc, mut compactor_streams_change_rx: UnboundedReceiver<( @@ -2558,7 +2649,30 @@ impl HummockManager { assert_ne!(0, pull_task_count); if let Some(compactor) = hummock_manager.compactor_manager.get_compactor(context_id) { if let Some((group, task_type)) = hummock_manager.auto_pick_compaction_group_and_type().await { - let selector: &mut Box = compaction_selectors.get_mut(&task_type).unwrap(); + let selector: &mut Box = { + let versioning_guard = read_lock!(hummock_manager, versioning).await; + let versioning = versioning_guard.deref(); + + if versioning.write_limit.contains_key(&group) { + let enable_emergency_picker = match hummock_manager + .compaction_group_manager + .read() + .await + .try_get_compaction_group_config(group) + { + Some(config) =>{ config.compaction_config.enable_emergency_picker }, + None => { unreachable!("compaction-group {} not exist", group) } + }; + + if enable_emergency_picker { + compaction_selectors.get_mut(&TaskType::Emergency).unwrap() + } else { + compaction_selectors.get_mut(&task_type).unwrap() + } + } else { + compaction_selectors.get_mut(&task_type).unwrap() + } + }; for _ in 0..pull_task_count { let compact_task = hummock_manager @@ -2614,15 +2728,14 @@ impl HummockManager { }, RequestEvent::ReportTask(ReportTask { - compact_task, + task_id, + task_status, + sorted_output_ssts, table_stats_change }) => { - if let Some(mut compact_task) = compact_task { - if let Err(e) = hummock_manager - .report_compact_task(&mut compact_task, Some(table_stats_change)) + if let Err(e) = hummock_manager.report_compact_task(task_id, TaskStatus::try_from(task_status).unwrap(), sorted_output_ssts, Some(table_stats_change)) .await { tracing::error!("report compact_tack fail {e:?}"); - } } }, @@ -2633,7 +2746,7 @@ impl HummockManager { let cancel_tasks = compactor_manager.update_task_heartbeats(&progress); // TODO: task cancellation can be batched - for mut task in cancel_tasks { + for task in cancel_tasks { tracing::info!( "Task with task_id {} with context_id {} has expired due to lack of visible progress", context_id, @@ -2642,7 +2755,7 @@ impl HummockManager { if let Err(e) = hummock_manager - .cancel_compact_task(&mut task, TaskStatus::HeartbeatCanceled) + .cancel_compact_task(task.task_id, TaskStatus::HeartbeatCanceled) .await { tracing::error!("Attempt to remove compaction task due to elapsed heartbeat failed. We will continue to track its heartbeat @@ -2824,6 +2937,7 @@ fn gen_version_delta<'a>( group_deltas.push(group_delta); version_delta.gc_object_ids.append(&mut gc_object_ids); version_delta.safe_epoch = std::cmp::max(old_version.safe_epoch, compact_task.watermark); + // Don't persist version delta generated by compaction to meta store in deterministic mode. // Because it will override existing version delta that has same ID generated in the data // ingestion phase. @@ -2843,7 +2957,7 @@ async fn write_exclusive_cluster_id( const CLUSTER_ID_NAME: &str = "0"; let cluster_id_dir = format!("{}/{}/", state_store_dir, CLUSTER_ID_DIR); let cluster_id_full_path = format!("{}{}", cluster_id_dir, CLUSTER_ID_NAME); - match object_store.read(&cluster_id_full_path, None).await { + match object_store.read(&cluster_id_full_path, ..).await { Ok(cluster_id) => Err(ObjectError::internal(format!( "Data directory is already used by another cluster with id {:?}, path {}.", String::from_utf8(cluster_id.to_vec()).unwrap(), @@ -2862,8 +2976,8 @@ async fn write_exclusive_cluster_id( } } -fn init_selectors() -> HashMap> { - let mut compaction_selectors: HashMap> = +fn init_selectors() -> HashMap> { + let mut compaction_selectors: HashMap> = HashMap::default(); compaction_selectors.insert( compact_task::TaskType::Dynamic, @@ -2881,12 +2995,19 @@ fn init_selectors() -> HashMap> { compact_task::TaskType::Tombstone, Box::::default(), ); + compaction_selectors.insert( + compact_task::TaskType::Emergency, + Box::::default(), + ); compaction_selectors } type CompactionRequestChannelItem = (CompactionGroupId, compact_task::TaskType); use tokio::sync::mpsc::error::SendError; +use super::compaction::selector::EmergencySelector; +use super::compaction::CompactionSelector; + #[derive(Debug, Default)] pub struct CompactionState { scheduled: Mutex>, @@ -2928,6 +3049,8 @@ impl CompactionState { Some(compact_task::TaskType::SpaceReclaim) } else if guard.contains(&(group, compact_task::TaskType::Ttl)) { Some(compact_task::TaskType::Ttl) + } else if guard.contains(&(group, compact_task::TaskType::Tombstone)) { + Some(compact_task::TaskType::Tombstone) } else if guard.contains(&(group, compact_task::TaskType::Dynamic)) { Some(compact_task::TaskType::Dynamic) } else { diff --git a/src/meta/src/hummock/manager/tests.rs b/src/meta/src/hummock/manager/tests.rs index 596149df3b8aa..4d5de0cc19011 100644 --- a/src/meta/src/hummock/manager/tests.rs +++ b/src/meta/src/hummock/manager/tests.rs @@ -39,10 +39,11 @@ use risingwave_pb::hummock::{ use risingwave_pb::meta::add_worker_node_request::Property; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; -use crate::hummock::compaction::{ - default_level_selector, CompactStatus, LevelSelector, ManualCompactionOption, +use crate::hummock::compaction::selector::{ + default_compaction_selector, CompactionSelector, ManualCompactionOption, SpaceReclaimCompactionSelector, }; +use crate::hummock::compaction::CompactStatus; use crate::hummock::error::Error; use crate::hummock::test_utils::*; use crate::hummock::{HummockManager, HummockManagerRef}; @@ -161,7 +162,7 @@ async fn test_hummock_compaction_task() { assert!(hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -189,10 +190,10 @@ async fn test_hummock_compaction_task() { .unwrap(); // Get a compaction task. - let mut compact_task = hummock_manager + let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -209,25 +210,24 @@ async fn test_hummock_compaction_task() { // Cancel the task and succeed. assert!(hummock_manager - .cancel_compact_task(&mut compact_task, TaskStatus::ManualCanceled) + .cancel_compact_task(compact_task.task_id, TaskStatus::ManualCanceled) .await .unwrap()); // Get a compaction task. - let mut compact_task = hummock_manager + let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() .unwrap(); assert_eq!(compact_task.get_task_id(), 3); // Finish the task and succeed. - compact_task.set_task_status(TaskStatus::Success); assert!(hummock_manager - .report_compact_task(&mut compact_task, None) + .report_compact_task(compact_task.task_id, TaskStatus::Success, vec![], None) .await .unwrap()); } @@ -731,7 +731,7 @@ async fn test_print_compact_task() { let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -847,15 +847,6 @@ async fn test_trigger_manual_compaction() { assert!(result.is_ok()); } - let task_id: u64 = 4; - let compact_task = hummock_manager - .compaction_task_from_assignment_for_test(task_id) - .await - .unwrap() - .compact_task - .unwrap(); - assert_eq!(task_id, compact_task.task_id); - { let option = ManualCompactionOption::default(); // all sst pending , test no compaction avail @@ -887,7 +878,7 @@ async fn test_hummock_compaction_task_heartbeat() { assert!(hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -915,10 +906,10 @@ async fn test_hummock_compaction_task_heartbeat() { .unwrap(); // Get a compaction task. - let mut compact_task = hummock_manager + let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -946,17 +937,21 @@ async fn test_hummock_compaction_task_heartbeat() { } // Cancel the task immediately and succeed. - compact_task.set_task_status(TaskStatus::ExecuteFailed); assert!(hummock_manager - .report_compact_task(&mut compact_task, None) + .report_compact_task( + compact_task.task_id, + TaskStatus::ExecuteFailed, + vec![], + None + ) .await .unwrap()); // Get a compaction task. - let mut compact_task = hummock_manager + let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -965,14 +960,18 @@ async fn test_hummock_compaction_task_heartbeat() { assert_eq!(compact_task.get_task_id(), 3); // Cancel the task after heartbeat has triggered and fail. - compact_task.set_task_status(TaskStatus::ExecuteFailed); // do not send heartbeats to the task for 30s seconds (ttl = 1s, heartbeat check freq. = 1s) // default_interval = 30s tokio::time::sleep(std::time::Duration::from_secs(32)).await; assert!(!hummock_manager - .report_compact_task(&mut compact_task, None) + .report_compact_task( + compact_task.task_id, + TaskStatus::ExecuteFailed, + vec![], + None + ) .await .unwrap()); shutdown_tx.send(()).unwrap(); @@ -999,7 +998,7 @@ async fn test_hummock_compaction_task_heartbeat_removal_on_node_removal() { assert!(hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -1030,7 +1029,7 @@ async fn test_hummock_compaction_task_heartbeat_removal_on_node_removal() { let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -1193,15 +1192,15 @@ async fn test_version_stats() { .compactor_manager_ref_for_test() .add_compactor(worker_node.id); - let mut compact_task = hummock_manager + let compact_task = hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() .unwrap(); - compact_task.task_status = TaskStatus::Success as _; + // compact_task.task_status = TaskStatus::Success as _; let compact_table_stats_change = TableStatsMap::from([ ( 2, @@ -1222,7 +1221,9 @@ async fn test_version_stats() { ]); hummock_manager .report_compact_task( - &mut compact_task, + compact_task.task_id, + TaskStatus::Success, + vec![], Some(to_prost_table_stats_map(compact_table_stats_change)), ) .await @@ -1632,16 +1633,18 @@ async fn test_split_compaction_group_trivial_expired() { .register_table_ids(&[(102, 2)]) .await .unwrap(); - let mut task = hummock_manager - .get_compact_task(2, &mut default_level_selector()) + let task = hummock_manager + .get_compact_task(2, &mut default_compaction_selector()) .await .unwrap() .unwrap(); + hummock_manager .split_compaction_group(2, &[100]) .await .unwrap(); - let mut selector: Box = Box::::default(); + let mut selector: Box = + Box::::default(); let reclaim_task = hummock_manager .get_compact_task_impl(2, &mut selector) .await @@ -1666,30 +1669,32 @@ async fn test_split_compaction_group_trivial_expired() { vec![100] ); - let mut task2 = hummock_manager - .get_compact_task(new_group_id, &mut default_level_selector()) + let task2 = hummock_manager + .get_compact_task(new_group_id, &mut default_compaction_selector()) .await .unwrap() .unwrap(); - task2.sorted_output_ssts = vec![SstableInfo { - object_id: 12, - sst_id: 12, - key_range: None, - table_ids: vec![100], - min_epoch: 20, - max_epoch: 20, - ..Default::default() - }]; - // delete all reference of sst-10 - task2.task_status = TaskStatus::Success as i32; + let ret = hummock_manager - .report_compact_task(&mut task2, None) + .report_compact_task( + task2.task_id, + TaskStatus::Success, + vec![SstableInfo { + object_id: 12, + sst_id: 12, + key_range: None, + table_ids: vec![100], + min_epoch: 20, + max_epoch: 20, + ..Default::default() + }], + None, + ) .await .unwrap(); assert!(ret); - task.task_status = TaskStatus::Success as i32; let ret = hummock_manager - .report_compact_task(&mut task, None) + .report_compact_task(task.task_id, TaskStatus::Success, vec![], None) .await .unwrap(); // the task has been canceld @@ -1750,37 +1755,41 @@ async fn test_split_compaction_group_on_demand_bottom_levels() { .await .unwrap(); // Construct data via manual compaction - let mut compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; + let compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; let base_level: usize = 6; assert_eq!(compaction_task.input_ssts[0].table_infos.len(), 1); assert_eq!(compaction_task.target_level, base_level as u32); - compaction_task.sorted_output_ssts = vec![ - SstableInfo { - object_id: 11, - sst_id: 11, - table_ids: vec![100, 101], - key_range: Some(KeyRange { - left: iterator_test_key_of_epoch(1, 1, 1), - right: iterator_test_key_of_epoch(1, 1, 1), - right_exclusive: false, - }), - ..Default::default() - }, - SstableInfo { - object_id: 12, - sst_id: 12, - table_ids: vec![100], - key_range: Some(KeyRange { - left: iterator_test_key_of_epoch(1, 2, 2), - right: iterator_test_key_of_epoch(1, 2, 2), - right_exclusive: false, - }), - ..Default::default() - }, - ]; - compaction_task.task_status = TaskStatus::Success.into(); + assert!(hummock_manager - .report_compact_task(&mut compaction_task, None) + .report_compact_task( + compaction_task.task_id, + TaskStatus::Success, + vec![ + SstableInfo { + object_id: 11, + sst_id: 11, + table_ids: vec![100, 101], + key_range: Some(KeyRange { + left: iterator_test_key_of_epoch(1, 1, 1), + right: iterator_test_key_of_epoch(1, 1, 1), + right_exclusive: false, + }), + ..Default::default() + }, + SstableInfo { + object_id: 12, + sst_id: 12, + table_ids: vec![100], + key_range: Some(KeyRange { + left: iterator_test_key_of_epoch(1, 2, 2), + right: iterator_test_key_of_epoch(1, 2, 2), + right_exclusive: false, + }), + ..Default::default() + }, + ], + None + ) .await .unwrap()); let current_version = hummock_manager.get_current_version().await; @@ -1911,7 +1920,7 @@ async fn test_compaction_task_expiration_due_to_split_group() { .await .unwrap(); - let mut compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; + let compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; assert_eq!(compaction_task.input_ssts[0].table_infos.len(), 2); hummock_manager .split_compaction_group(2, &[100]) @@ -1919,9 +1928,9 @@ async fn test_compaction_task_expiration_due_to_split_group() { .unwrap(); let version_1 = hummock_manager.get_current_version().await; - compaction_task.task_status = TaskStatus::Success.into(); + // compaction_task.task_status = TaskStatus::Success.into(); assert!(!hummock_manager - .report_compact_task(&mut compaction_task, None) + .report_compact_task(compaction_task.task_id, TaskStatus::Success, vec![], None) .await .unwrap()); let version_2 = hummock_manager.get_current_version().await; @@ -1930,11 +1939,10 @@ async fn test_compaction_task_expiration_due_to_split_group() { "version should not change because compaction task has been cancelled" ); - let mut compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; + let compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; assert_eq!(compaction_task.input_ssts[0].table_infos.len(), 2); - compaction_task.task_status = TaskStatus::Success.into(); hummock_manager - .report_compact_task(&mut compaction_task, None) + .report_compact_task(compaction_task.task_id, TaskStatus::Success, vec![], None) .await .unwrap(); @@ -1968,18 +1976,21 @@ async fn test_move_tables_between_compaction_group() { .await .unwrap(); // Construct data via manual compaction - let mut compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; + let compaction_task = get_manual_compact_task(&hummock_manager, context_id).await; let base_level: usize = 6; assert_eq!(compaction_task.input_ssts[0].table_infos.len(), 1); assert_eq!(compaction_task.target_level, base_level as u32); - compaction_task.sorted_output_ssts = vec![ - gen_sstable_info(11, 1, vec![100]), - gen_sstable_info(12, 2, vec![100, 101]), - gen_sstable_info(13, 3, vec![101, 102]), - ]; - compaction_task.task_status = TaskStatus::Success.into(); assert!(hummock_manager - .report_compact_task(&mut compaction_task, None) + .report_compact_task( + compaction_task.task_id, + TaskStatus::Success, + vec![ + gen_sstable_info(11, 1, vec![100]), + gen_sstable_info(12, 2, vec![100, 101]), + gen_sstable_info(13, 3, vec![101, 102]), + ], + None + ) .await .unwrap()); let sst_2 = gen_extend_sstable_info(14, 2, 1, vec![101, 102]); @@ -2021,9 +2032,10 @@ async fn test_move_tables_between_compaction_group() { let groups = info.keys().sorted().cloned().collect_vec(); assert_eq!(groups, vec![2, new_group_id]); - let mut selector: Box = Box::::default(); + let mut selector: Box = + Box::::default(); - let mut compaction_task = hummock_manager + let compaction_task = hummock_manager .get_compact_task(2, &mut selector) .await .unwrap() @@ -2031,11 +2043,14 @@ async fn test_move_tables_between_compaction_group() { assert_eq!(compaction_task.existing_table_ids, vec![101, 102]); assert_eq!(compaction_task.input_ssts[0].table_infos.len(), 1); assert_eq!(compaction_task.input_ssts[0].table_infos[0].object_id, 12); - compaction_task.sorted_output_ssts = vec![gen_sstable_info(20, 2, vec![101])]; - compaction_task.task_status = TaskStatus::Success.into(); let ret = hummock_manager - .report_compact_task(&mut compaction_task, None) + .report_compact_task( + compaction_task.task_id, + TaskStatus::Success, + vec![gen_sstable_info(20, 2, vec![101])], + None, + ) .await .unwrap(); assert!(ret); diff --git a/src/meta/src/hummock/manager/versioning.rs b/src/meta/src/hummock/manager/versioning.rs index 09a331fa3d9f5..e1ed8a5d716c2 100644 --- a/src/meta/src/hummock/manager/versioning.rs +++ b/src/meta/src/hummock/manager/versioning.rs @@ -23,6 +23,7 @@ use risingwave_hummock_sdk::compaction_group::hummock_version_ext::{ HummockVersionExt, }; use risingwave_hummock_sdk::compaction_group::{StateTableId, StaticCompactionGroupId}; +use risingwave_hummock_sdk::table_stats::add_prost_table_stats_map; use risingwave_hummock_sdk::{ CompactionGroupId, HummockContextId, HummockSstableObjectId, HummockVersionId, FIRST_VERSION_ID, }; @@ -30,15 +31,18 @@ use risingwave_pb::common::WorkerNode; use risingwave_pb::hummock::write_limits::WriteLimit; use risingwave_pb::hummock::{ CompactionConfig, HummockPinnedSnapshot, HummockPinnedVersion, HummockVersion, - HummockVersionCheckpoint, HummockVersionDelta, HummockVersionStats, + HummockVersionCheckpoint, HummockVersionDelta, HummockVersionStats, SstableInfo, TableStats, }; use risingwave_pb::meta::subscribe_response::{Info, Operation}; +use crate::hummock::error::Result; use crate::hummock::manager::worker::{HummockManagerEvent, HummockManagerEventSender}; -use crate::hummock::manager::{read_lock, write_lock}; +use crate::hummock::manager::{commit_multi_var, read_lock, write_lock}; use crate::hummock::metrics_utils::{trigger_safepoint_stat, trigger_write_stop_stats}; use crate::hummock::model::CompactionGroup; use crate::hummock::HummockManager; +use crate::model::{ValTransaction, VarTransaction}; +use crate::storage::Transaction; /// `HummockVersionSafePoint` prevents hummock versions GE than it from being GC. /// It's used by meta node itself to temporarily pin versions. @@ -271,6 +275,22 @@ impl HummockManager { let guard = read_lock!(self, versioning).await; guard.write_limit.clone() } + + #[named] + pub async fn list_branched_objects(&self) -> BTreeMap { + let guard = read_lock!(self, versioning).await; + guard.branched_ssts.clone() + } + + #[named] + pub async fn rebuild_table_stats(&self) -> Result<()> { + let mut versioning = write_lock!(self, versioning).await; + let new_stats = rebuild_table_stats(&versioning.current_version); + let mut version_stats = VarTransaction::new(&mut versioning.version_stats); + *version_stats = new_stats; + commit_multi_var!(self, None, Transaction::default(), version_stats)?; + Ok(()) + } } /// Calculates write limits for `target_groups`. @@ -332,6 +352,47 @@ pub(super) fn create_init_version(default_compaction_config: CompactionConfig) - init_version } +/// Rebuilds table stats from the given version. +/// Note that the result is approximate value. See `estimate_table_stats`. +fn rebuild_table_stats(version: &HummockVersion) -> HummockVersionStats { + let mut stats = HummockVersionStats { + hummock_version_id: version.id, + table_stats: Default::default(), + }; + for level in version.get_combined_levels() { + for sst in &level.table_infos { + let changes = estimate_table_stats(sst); + add_prost_table_stats_map(&mut stats.table_stats, &changes); + } + } + stats +} + +/// Estimates table stats change from the given file. +/// - The file stats is evenly distributed among multiple tables within the file. +/// - The total key size and total value size are estimated based on key range and file size. +/// - Branched files may lead to an overestimation. +fn estimate_table_stats(sst: &SstableInfo) -> HashMap { + let mut changes: HashMap = HashMap::default(); + let weighted_value = + |value: i64| -> i64 { (value as f64 / sst.table_ids.len() as f64).ceil() as i64 }; + let key_range = sst.key_range.as_ref().unwrap(); + let estimated_key_size: u64 = (key_range.left.len() + key_range.right.len()) as u64 / 2; + let mut estimated_total_key_size = estimated_key_size * sst.total_key_count; + if estimated_total_key_size > sst.uncompressed_file_size { + estimated_total_key_size = sst.uncompressed_file_size / 2; + tracing::warn!(sst.sst_id, "Calculated estimated_total_key_size {} > uncompressed_file_size {}. Use uncompressed_file_size/2 as estimated_total_key_size instead.", estimated_total_key_size, sst.uncompressed_file_size); + } + let estimated_total_value_size = sst.uncompressed_file_size - estimated_total_key_size; + for table_id in &sst.table_ids { + let e = changes.entry(*table_id).or_default(); + e.total_key_count += weighted_value(sst.total_key_count as i64); + e.total_key_size += weighted_value(estimated_total_key_size as i64); + e.total_value_size += weighted_value(estimated_total_value_size as i64); + } + changes +} + #[cfg(test)] mod tests { use std::collections::HashMap; @@ -340,10 +401,15 @@ mod tests { use risingwave_hummock_sdk::{CompactionGroupId, HummockVersionId}; use risingwave_pb::hummock::hummock_version::Levels; use risingwave_pb::hummock::write_limits::WriteLimit; - use risingwave_pb::hummock::{HummockPinnedVersion, HummockVersion, Level, OverlappingLevel}; + use risingwave_pb::hummock::{ + HummockPinnedVersion, HummockVersion, HummockVersionStats, KeyRange, Level, + OverlappingLevel, SstableInfo, + }; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; - use crate::hummock::manager::versioning::{calc_new_write_limits, Versioning}; + use crate::hummock::manager::versioning::{ + calc_new_write_limits, estimate_table_stats, rebuild_table_stats, Versioning, + }; use crate::hummock::model::CompactionGroup; #[test] @@ -464,4 +530,92 @@ mod tests { "too many L0 sub levels: 11 > 5" ); } + + #[test] + fn test_estimate_table_stats() { + let sst = SstableInfo { + key_range: Some(KeyRange { + left: vec![1; 10], + right: vec![1; 20], + ..Default::default() + }), + table_ids: vec![1, 2, 3], + total_key_count: 6000, + uncompressed_file_size: 6_000_000, + ..Default::default() + }; + let changes = estimate_table_stats(&sst); + assert_eq!(changes.len(), 3); + for stats in changes.values() { + assert_eq!(stats.total_key_count, 6000 / 3); + assert_eq!(stats.total_key_size, (10 + 20) / 2 * 6000 / 3); + assert_eq!( + stats.total_value_size, + (6_000_000 - (10 + 20) / 2 * 6000) / 3 + ); + } + + let mut version = HummockVersion { + id: 123, + levels: Default::default(), + max_committed_epoch: 0, + safe_epoch: 0, + }; + for cg in 1..3 { + version.levels.insert( + cg, + Levels { + levels: vec![Level { + table_infos: vec![sst.clone()], + ..Default::default() + }], + l0: Some(Default::default()), + ..Default::default() + }, + ); + } + let HummockVersionStats { + hummock_version_id, + table_stats, + } = rebuild_table_stats(&version); + assert_eq!(hummock_version_id, version.id); + assert_eq!(table_stats.len(), 3); + for (tid, stats) in table_stats { + assert_eq!( + stats.total_key_count, + changes.get(&tid).unwrap().total_key_count * 2 + ); + assert_eq!( + stats.total_key_size, + changes.get(&tid).unwrap().total_key_size * 2 + ); + assert_eq!( + stats.total_value_size, + changes.get(&tid).unwrap().total_value_size * 2 + ); + } + } + + #[test] + fn test_estimate_table_stats_large_key_range() { + let sst = SstableInfo { + key_range: Some(KeyRange { + left: vec![1; 1000], + right: vec![1; 2000], + ..Default::default() + }), + table_ids: vec![1, 2, 3], + total_key_count: 6000, + uncompressed_file_size: 60_000, + ..Default::default() + }; + let changes = estimate_table_stats(&sst); + assert_eq!(changes.len(), 3); + for t in &sst.table_ids { + let stats = changes.get(t).unwrap(); + assert_eq!(stats.total_key_count, 6000 / 3); + assert_eq!(stats.total_key_size, 60_000 / 2 / 3); + assert_eq!(stats.total_value_size, (60_000 - 60_000 / 2) / 3); + } + } } diff --git a/src/meta/src/hummock/manager/worker.rs b/src/meta/src/hummock/manager/worker.rs index 8a43ddc87247b..bc2103635b59f 100644 --- a/src/meta/src/hummock/manager/worker.rs +++ b/src/meta/src/hummock/manager/worker.rs @@ -34,7 +34,7 @@ pub enum HummockManagerEvent { } impl HummockManager { - pub(crate) async fn start_worker( + pub async fn start_worker( self: &HummockManagerRef, mut receiver: HummockManagerEventReceiver, ) -> JoinHandle<()> { @@ -98,58 +98,29 @@ impl HummockManager { let retry_strategy = ExponentialBackoff::from_millis(10) .max_delay(Duration::from_secs(60)) .map(jitter); - match notification { - LocalNotification::WorkerNodeDeleted(worker_node) => { - if worker_node.get_type().unwrap() == WorkerType::Compactor { - self.compactor_manager.remove_compactor(worker_node.id); - } - tokio_retry::RetryIf::spawn( - retry_strategy.clone(), - || async { - if let Err(err) = self.release_contexts(vec![worker_node.id]).await { - tracing::warn!( - "Failed to release hummock context {}. {}. Will retry.", - worker_node.id, - err - ); - return Err(err); - } - Ok(()) - }, - RetryableError::default(), - ) - .await - .expect("retry until success"); - tracing::info!("Released hummock context {}", worker_node.id); - sync_point!("AFTER_RELEASE_HUMMOCK_CONTEXTS_ASYNC"); + if let LocalNotification::WorkerNodeDeleted(worker_node) = notification { + if worker_node.get_type().unwrap() == WorkerType::Compactor { + self.compactor_manager.remove_compactor(worker_node.id); } - // TODO move `CompactionTaskNeedCancel` to `handle_hummock_manager_event` - // TODO extract retry boilerplate code - LocalNotification::CompactionTaskNeedCancel(compact_task) => { - let task_id = compact_task.task_id; - tokio_retry::RetryIf::spawn( - retry_strategy.clone(), - || async { - let mut compact_task_mut = compact_task.clone(); - if let Err(err) = self.cancel_compact_task_impl(&mut compact_task_mut).await - { - tracing::warn!( - "Failed to cancel compaction task {}. {}. Will retry.", - compact_task.task_id, - err - ); - return Err(err); - } - Ok(()) - }, - RetryableError::default(), - ) - .await - .expect("retry until success"); - tracing::info!("Cancelled compaction task {}", task_id); - sync_point!("AFTER_CANCEL_COMPACTION_TASK_ASYNC"); - } - _ => {} + tokio_retry::RetryIf::spawn( + retry_strategy.clone(), + || async { + if let Err(err) = self.release_contexts(vec![worker_node.id]).await { + tracing::warn!( + "Failed to release hummock context {}. {}. Will retry.", + worker_node.id, + err + ); + return Err(err); + } + Ok(()) + }, + RetryableError::default(), + ) + .await + .expect("retry until success"); + tracing::info!("Released hummock context {}", worker_node.id); + sync_point!("AFTER_RELEASE_HUMMOCK_CONTEXTS_ASYNC"); } } } diff --git a/src/meta/src/hummock/metrics_utils.rs b/src/meta/src/hummock/metrics_utils.rs index 8bcc1f1d2c1c6..6818b7f68570e 100644 --- a/src/meta/src/hummock/metrics_utils.rs +++ b/src/meta/src/hummock/metrics_utils.rs @@ -32,7 +32,8 @@ use risingwave_pb::hummock::{ HummockVersionCheckpoint, HummockVersionStats, LevelType, }; -use super::compaction::{get_compression_algorithm, DynamicLevelSelectorCore}; +use super::compaction::get_compression_algorithm; +use super::compaction::selector::DynamicLevelSelectorCore; use crate::hummock::compaction::CompactStatus; use crate::rpc::metrics::MetaMetrics; @@ -483,15 +484,15 @@ pub fn trigger_split_stat( .with_label_values(&[&group_label]) .set(member_table_id_len as _); - let branched_sst_count = branched_ssts + let branched_sst_count: usize = branched_ssts .values() - .map(|branched_map| branched_map.iter()) - .flat_map(|branched_map| { + .map(|branched_map| { branched_map - .filter(|(group_id, _sst_id)| **group_id == compaction_group_id) - .map(|(_, v)| v) + .keys() + .filter(|group_id| **group_id == compaction_group_id) + .count() }) - .sum::(); + .sum(); metrics .branched_sst_count diff --git a/src/meta/src/hummock/mock_hummock_meta_client.rs b/src/meta/src/hummock/mock_hummock_meta_client.rs index 915beee8e3a3f..678c701ca2891 100644 --- a/src/meta/src/hummock/mock_hummock_meta_client.rs +++ b/src/meta/src/hummock/mock_hummock_meta_client.rs @@ -27,6 +27,8 @@ use risingwave_hummock_sdk::{ SstObjectIdRange, }; use risingwave_pb::common::{HostAddress, WorkerType}; +use risingwave_pb::hummock::compact_task::TaskStatus; +use risingwave_pb::hummock::subscribe_compaction_event_request::{Event, ReportTask}; use risingwave_pb::hummock::subscribe_compaction_event_response::Event as ResponseEvent; use risingwave_pb::hummock::{ compact_task, CompactTask, HummockSnapshot, HummockVersion, SubscribeCompactionEventRequest, @@ -38,8 +40,8 @@ use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; use tokio::task::JoinHandle; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::hummock::compaction::{ - default_level_selector, LevelSelector, SpaceReclaimCompactionSelector, +use crate::hummock::compaction::selector::{ + default_compaction_selector, CompactionSelector, SpaceReclaimCompactionSelector, }; use crate::hummock::HummockManager; @@ -81,7 +83,7 @@ impl MockHummockMetaClient { self.hummock_manager .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap_or(None) @@ -224,7 +226,7 @@ impl HummockMetaClient for MockHummockMetaClient { .compactor_manager_ref_for_test() .add_compactor(context_id); - let (request_sender, _request_receiver) = + let (request_sender, mut request_receiver) = unbounded_channel::(); self.compact_context_id.store(context_id, Ordering::Release); @@ -232,6 +234,8 @@ impl HummockMetaClient for MockHummockMetaClient { let (task_tx, task_rx) = tokio::sync::mpsc::unbounded_channel(); let hummock_manager_compact = self.hummock_manager.clone(); + let mut join_handle_vec = vec![]; + let handle = tokio::spawn(async move { loop { let group_and_type = hummock_manager_compact @@ -244,8 +248,8 @@ impl HummockMetaClient for MockHummockMetaClient { let (group, task_type) = group_and_type.unwrap(); - let mut selector: Box = match task_type { - compact_task::TaskType::Dynamic => default_level_selector(), + let mut selector: Box = match task_type { + compact_task::TaskType::Dynamic => default_compaction_selector(), compact_task::TaskType::SpaceReclaim => { Box::::default() } @@ -270,11 +274,44 @@ impl HummockMetaClient for MockHummockMetaClient { } }); + join_handle_vec.push(handle); + + let hummock_manager_compact = self.hummock_manager.clone(); + let report_handle = tokio::spawn(async move { + tracing::info!("report_handle start"); + + loop { + if let Some(item) = request_receiver.recv().await { + if let Event::ReportTask(ReportTask { + task_id, + task_status, + sorted_output_ssts, + table_stats_change, + }) = item.event.unwrap() + { + if let Err(e) = hummock_manager_compact + .report_compact_task( + task_id, + TaskStatus::try_from(task_status).unwrap(), + sorted_output_ssts, + Some(table_stats_change), + ) + .await + { + tracing::error!("report compact_tack fail {e:?}"); + } + } + } + } + }); + + join_handle_vec.push(report_handle); + Ok(( request_sender, Box::pin(CompactionEventItemStream { inner: UnboundedReceiverStream::new(task_rx), - _handle: handle, + _handle: join_handle_vec, }), )) } @@ -288,7 +325,7 @@ impl MockHummockMetaClient { pub struct CompactionEventItemStream { inner: UnboundedReceiverStream, - _handle: JoinHandle<()>, + _handle: Vec>, } impl Drop for CompactionEventItemStream { diff --git a/src/meta/src/hummock/model/compaction_group_config.rs b/src/meta/src/hummock/model/compaction_group_config.rs index 8331abac62017..fa1bd1f88b3bd 100644 --- a/src/meta/src/hummock/model/compaction_group_config.rs +++ b/src/meta/src/hummock/model/compaction_group_config.rs @@ -23,8 +23,8 @@ use crate::model::{MetadataModel, MetadataModelResult}; #[derive(Debug, Clone, PartialEq)] pub struct CompactionGroup { - pub(crate) group_id: CompactionGroupId, - pub(crate) compaction_config: Arc, + pub group_id: CompactionGroupId, + pub compaction_config: Arc, } impl CompactionGroup { diff --git a/src/meta/src/hummock/model/mod.rs b/src/meta/src/hummock/model/mod.rs index a2e5d1748f351..66c12d90836b9 100644 --- a/src/meta/src/hummock/model/mod.rs +++ b/src/meta/src/hummock/model/mod.rs @@ -17,7 +17,6 @@ mod compaction_group_config; mod compaction_status; mod pinned_snapshot; mod pinned_version; -mod version; mod version_delta; mod version_stats; @@ -25,12 +24,10 @@ pub use compaction_group_config::CompactionGroup; pub use compaction_status::*; pub use pinned_snapshot::*; pub use pinned_version::*; -pub use version::*; pub use version_delta::*; /// Column family names for hummock. /// Deprecated `cf_name` should be reserved for backward compatibility. -const HUMMOCK_VERSION_CF_NAME: &str = "cf/hummock_0"; const HUMMOCK_VERSION_DELTA_CF_NAME: &str = "cf/hummock_1"; const HUMMOCK_PINNED_VERSION_CF_NAME: &str = "cf/hummock_2"; const HUMMOCK_PINNED_SNAPSHOT_CF_NAME: &str = "cf/hummock_3"; diff --git a/src/meta/src/hummock/model/version.rs b/src/meta/src/hummock/model/version.rs deleted file mode 100644 index d6a85ae745c64..0000000000000 --- a/src/meta/src/hummock/model/version.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use prost::Message; -use risingwave_hummock_sdk::HummockVersionId; -use risingwave_pb::hummock::HummockVersion; - -use crate::hummock::model::HUMMOCK_VERSION_CF_NAME; -use crate::model::{MetadataModel, MetadataModelResult}; - -/// `HummockVersion` tracks `Sstables` in given version. -impl MetadataModel for HummockVersion { - type KeyType = HummockVersionId; - type PbType = HummockVersion; - - fn cf_name() -> String { - String::from(HUMMOCK_VERSION_CF_NAME) - } - - fn to_protobuf(&self) -> Self::PbType { - self.clone() - } - - fn to_protobuf_encoded_vec(&self) -> Vec { - self.encode_to_vec() - } - - fn from_protobuf(prost: Self::PbType) -> Self { - prost - } - - fn key(&self) -> MetadataModelResult { - Ok(0) - } -} diff --git a/src/meta/src/hummock/test_utils.rs b/src/meta/src/hummock/test_utils.rs index 632d56ca2c400..3d42442ae7c67 100644 --- a/src/meta/src/hummock/test_utils.rs +++ b/src/meta/src/hummock/test_utils.rs @@ -31,7 +31,7 @@ use risingwave_pb::meta::add_worker_node_request::Property; use crate::hummock::compaction::compaction_config::CompactionConfigBuilder; #[cfg(test)] -use crate::hummock::compaction::default_level_selector; +use crate::hummock::compaction::selector::default_compaction_selector; use crate::hummock::{CompactorManager, HummockManager, HummockManagerRef}; use crate::manager::{ ClusterManager, ClusterManagerRef, FragmentManager, MetaSrvEnv, META_NODE_ID, @@ -92,7 +92,7 @@ pub async fn add_test_tables( StaticCompactionGroupId::StateDefault.into(), ) .await; - let mut selector = default_level_selector(); + let mut selector = default_compaction_selector(); let mut compact_task = hummock_manager .get_compact_task(StaticCompactionGroupId::StateDefault.into(), &mut selector) .await @@ -114,10 +114,15 @@ pub async fn add_test_tables( .unwrap(); assert_eq!(compactor.context_id(), context_id); } - compact_task.sorted_output_ssts = test_tables_2.clone(); - compact_task.set_task_status(TaskStatus::Success); + let ret = hummock_manager - .report_compact_task(&mut compact_task, None) + .report_compact_task_for_test( + compact_task.task_id, + Some(compact_task), + TaskStatus::Success, + test_tables_2.clone(), + None, + ) .await .unwrap(); assert!(ret); diff --git a/src/meta/src/hummock/vacuum.rs b/src/meta/src/hummock/vacuum.rs index 31f4651d6fdfd..992deb5e636ce 100644 --- a/src/meta/src/hummock/vacuum.rs +++ b/src/meta/src/hummock/vacuum.rs @@ -164,8 +164,7 @@ impl VacuumManager { &self, objects_to_delete: &mut Vec, ) -> MetaResult<()> { - let reject: HashSet = - self.backup_manager.list_pinned_ssts().into_iter().collect(); + let reject = self.backup_manager.list_pinned_ssts(); // Ack these SSTs immediately, because they tend to be pinned for long time. // They will be GCed during full GC when they are no longer pinned. let to_ack = objects_to_delete diff --git a/src/meta/src/lib.rs b/src/meta/src/lib.rs index 92d3c571f57c5..f549578f079c6 100644 --- a/src/meta/src/lib.rs +++ b/src/meta/src/lib.rs @@ -14,12 +14,10 @@ #![allow(clippy::derive_partial_eq_without_eq)] #![feature(trait_alias)] -#![feature(binary_heap_drain_sorted)] #![feature(type_alias_impl_trait)] -#![feature(extract_if)] -#![feature(custom_test_frameworks)] #![feature(lint_reasons)] #![feature(map_try_insert)] +#![feature(extract_if)] #![feature(hash_extract_if)] #![feature(btree_extract_if)] #![feature(result_option_inspect)] @@ -28,323 +26,39 @@ #![feature(error_generic_member_access)] #![feature(assert_matches)] #![feature(try_blocks)] -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] +#![feature(custom_test_frameworks)] #![test_runner(risingwave_test_runner::test_runner::run_failpont_tests)] #![feature(is_sorted)] #![feature(impl_trait_in_assoc_type)] #![feature(type_name_of_val)] pub mod backup_restore; -mod barrier; +pub mod barrier; +pub mod controller; #[cfg(not(madsim))] // no need in simulation test -mod dashboard; -mod error; +pub mod dashboard; +pub mod error; pub mod hummock; pub mod manager; pub mod model; -mod rpc; -pub(crate) mod serving; +pub mod model_v2; +pub mod rpc; +pub mod serving; pub mod storage; -mod stream; -pub(crate) mod telemetry; -use std::time::Duration; +pub mod stream; +pub mod telemetry; -use clap::Parser; pub use error::{MetaError, MetaResult}; -use risingwave_common::config::OverrideConfig; -use risingwave_common::{GIT_SHA, RW_VERSION}; pub use rpc::{ElectionClient, ElectionMember, EtcdElectionClient}; use crate::manager::MetaOpts; -use crate::rpc::server::{rpc_serve, AddressInfo, MetaStoreBackend}; - -#[derive(Debug, Clone, Parser, OverrideConfig)] -#[command(version, about = "The central metadata management service")] -pub struct MetaNodeOpts { - #[clap(long, env = "RW_VPC_ID")] - vpc_id: Option, - - #[clap(long, env = "RW_VPC_SECURITY_GROUP_ID")] - security_group_id: Option, - - #[clap(long, env = "RW_LISTEN_ADDR", default_value = "127.0.0.1:5690")] - listen_addr: String, - - /// The address for contacting this instance of the service. - /// This would be synonymous with the service's "public address" - /// or "identifying address". - /// It will serve as a unique identifier in cluster - /// membership and leader election. Must be specified for etcd backend. - #[clap(long, env = "RW_ADVERTISE_ADDR")] - advertise_addr: String, - - #[clap(long, env = "RW_DASHBOARD_HOST")] - dashboard_host: Option, - - #[clap(long, env = "RW_PROMETHEUS_HOST")] - prometheus_host: Option, - - #[clap(long, env = "RW_ETCD_ENDPOINTS", default_value_t = String::from(""))] - etcd_endpoints: String, - - /// Enable authentication with etcd. By default disabled. - #[clap(long, env = "RW_ETCD_AUTH")] - etcd_auth: bool, - - /// Username of etcd, required when --etcd-auth is enabled. - #[clap(long, env = "RW_ETCD_USERNAME", default_value = "")] - etcd_username: String, - - /// Password of etcd, required when --etcd-auth is enabled. - #[clap(long, env = "RW_ETCD_PASSWORD", default_value = "")] - etcd_password: String, - - #[clap(long, env = "RW_DASHBOARD_UI_PATH")] - dashboard_ui_path: Option, - - /// For dashboard service to fetch cluster info. - #[clap(long, env = "RW_PROMETHEUS_ENDPOINT")] - prometheus_endpoint: Option, - - /// Endpoint of the connector node, there will be a sidecar connector node - /// colocated with Meta node in the cloud environment - #[clap(long, env = "RW_CONNECTOR_RPC_ENDPOINT")] - pub connector_rpc_endpoint: Option, - - /// Default tag for the endpoint created when creating a privatelink connection. - /// Will be appended to the tags specified in the `tags` field in with clause in `create - /// connection`. - #[clap(long, env = "RW_PRIVATELINK_ENDPOINT_DEFAULT_TAGS")] - pub privatelink_endpoint_default_tags: Option, - - /// The path of `risingwave.toml` configuration file. - /// - /// If empty, default configuration values will be used. - #[clap(long, env = "RW_CONFIG_PATH", default_value = "")] - pub config_path: String, - - #[clap(long, env = "RW_BACKEND", value_enum)] - #[override_opts(path = meta.backend)] - backend: Option, - - /// The interval of periodic barrier. - #[clap(long, env = "RW_BARRIER_INTERVAL_MS")] - #[override_opts(path = system.barrier_interval_ms)] - barrier_interval_ms: Option, - - /// Target size of the Sstable. - #[clap(long, env = "RW_SSTABLE_SIZE_MB")] - #[override_opts(path = system.sstable_size_mb)] - sstable_size_mb: Option, - - /// Size of each block in bytes in SST. - #[clap(long, env = "RW_BLOCK_SIZE_KB")] - #[override_opts(path = system.block_size_kb)] - block_size_kb: Option, - - /// False positive probability of bloom filter. - #[clap(long, env = "RW_BLOOM_FALSE_POSITIVE")] - #[override_opts(path = system.bloom_false_positive)] - bloom_false_positive: Option, - - /// State store url - #[clap(long, env = "RW_STATE_STORE")] - #[override_opts(path = system.state_store)] - state_store: Option, - - /// Remote directory for storing data and metadata objects. - #[clap(long, env = "RW_DATA_DIRECTORY")] - #[override_opts(path = system.data_directory)] - data_directory: Option, - - /// Whether config object storage bucket lifecycle to purge stale data. - #[clap(long, env = "RW_DO_NOT_CONFIG_BUCKET_LIFECYCLE")] - #[override_opts(path = meta.do_not_config_object_storage_lifecycle)] - do_not_config_object_storage_lifecycle: Option, - - /// Remote storage url for storing snapshots. - #[clap(long, env = "RW_BACKUP_STORAGE_URL")] - #[override_opts(path = system.backup_storage_url)] - backup_storage_url: Option, - - /// Remote directory for storing snapshots. - #[clap(long, env = "RW_BACKUP_STORAGE_DIRECTORY")] - #[override_opts(path = system.backup_storage_directory)] - backup_storage_directory: Option, - - #[clap(long, env = "RW_OBJECT_STORE_STREAMING_READ_TIMEOUT_MS", value_enum)] - #[override_opts(path = storage.object_store_streaming_read_timeout_ms)] - pub object_store_streaming_read_timeout_ms: Option, - #[clap(long, env = "RW_OBJECT_STORE_STREAMING_UPLOAD_TIMEOUT_MS", value_enum)] - #[override_opts(path = storage.object_store_streaming_upload_timeout_ms)] - pub object_store_streaming_upload_timeout_ms: Option, - #[clap(long, env = "RW_OBJECT_STORE_UPLOAD_TIMEOUT_MS", value_enum)] - #[override_opts(path = storage.object_store_upload_timeout_ms)] - pub object_store_upload_timeout_ms: Option, - #[clap(long, env = "RW_OBJECT_STORE_READ_TIMEOUT_MS", value_enum)] - #[override_opts(path = storage.object_store_read_timeout_ms)] - pub object_store_read_timeout_ms: Option, -} - -use std::future::Future; -use std::pin::Pin; - -use risingwave_common::config::{load_config, MetaBackend, RwConfig}; -use tracing::info; - -/// Start meta node -pub fn start(opts: MetaNodeOpts) -> Pin + Send>> { - // WARNING: don't change the function signature. Making it `async fn` will cause - // slow compile in release mode. - Box::pin(async move { - info!("Starting meta node"); - info!("> options: {:?}", opts); - let config = load_config(&opts.config_path, &opts); - info!("> config: {:?}", config); - info!("> version: {} ({})", RW_VERSION, GIT_SHA); - let listen_addr = opts.listen_addr.parse().unwrap(); - let dashboard_addr = opts.dashboard_host.map(|x| x.parse().unwrap()); - let prometheus_addr = opts.prometheus_host.map(|x| x.parse().unwrap()); - let backend = match config.meta.backend { - MetaBackend::Etcd => MetaStoreBackend::Etcd { - endpoints: opts - .etcd_endpoints - .split(',') - .map(|x| x.to_string()) - .collect(), - credentials: match opts.etcd_auth { - true => Some((opts.etcd_username, opts.etcd_password)), - false => None, - }, - }, - MetaBackend::Mem => MetaStoreBackend::Mem, - }; - - validate_config(&config); - - let max_heartbeat_interval = - Duration::from_secs(config.meta.max_heartbeat_interval_secs as u64); - let max_idle_ms = config.meta.dangerous_max_idle_secs.unwrap_or(0) * 1000; - let in_flight_barrier_nums = config.streaming.in_flight_barrier_nums; - let privatelink_endpoint_default_tags = - opts.privatelink_endpoint_default_tags.map(|tags| { - tags.split(',') - .map(|s| { - let key_val = s.split_once('=').unwrap(); - (key_val.0.to_string(), key_val.1.to_string()) - }) - .collect() - }); - - info!("Meta server listening at {}", listen_addr); - let add_info = AddressInfo { - advertise_addr: opts.advertise_addr, - listen_addr, - prometheus_addr, - dashboard_addr, - ui_path: opts.dashboard_ui_path, - }; - - let (mut join_handle, leader_lost_handle, shutdown_send) = rpc_serve( - add_info, - backend, - max_heartbeat_interval, - config.meta.meta_leader_lease_secs, - MetaOpts { - enable_recovery: !config.meta.disable_recovery, - in_flight_barrier_nums, - max_idle_ms, - compaction_deterministic_test: config.meta.enable_compaction_deterministic, - default_parallelism: config.meta.default_parallelism, - vacuum_interval_sec: config.meta.vacuum_interval_sec, - vacuum_spin_interval_ms: config.meta.vacuum_spin_interval_ms, - hummock_version_checkpoint_interval_sec: config - .meta - .hummock_version_checkpoint_interval_sec, - min_delta_log_num_for_hummock_version_checkpoint: config - .meta - .min_delta_log_num_for_hummock_version_checkpoint, - min_sst_retention_time_sec: config.meta.min_sst_retention_time_sec, - full_gc_interval_sec: config.meta.full_gc_interval_sec, - collect_gc_watermark_spin_interval_sec: config - .meta - .collect_gc_watermark_spin_interval_sec, - enable_committed_sst_sanity_check: config.meta.enable_committed_sst_sanity_check, - periodic_compaction_interval_sec: config.meta.periodic_compaction_interval_sec, - node_num_monitor_interval_sec: config.meta.node_num_monitor_interval_sec, - prometheus_endpoint: opts.prometheus_endpoint, - vpc_id: opts.vpc_id, - security_group_id: opts.security_group_id, - connector_rpc_endpoint: opts.connector_rpc_endpoint, - privatelink_endpoint_default_tags, - periodic_space_reclaim_compaction_interval_sec: config - .meta - .periodic_space_reclaim_compaction_interval_sec, - telemetry_enabled: config.server.telemetry_enabled, - periodic_ttl_reclaim_compaction_interval_sec: config - .meta - .periodic_ttl_reclaim_compaction_interval_sec, - periodic_tombstone_reclaim_compaction_interval_sec: config - .meta - .periodic_tombstone_reclaim_compaction_interval_sec, - periodic_split_compact_group_interval_sec: config - .meta - .periodic_split_compact_group_interval_sec, - split_group_size_limit: config.meta.split_group_size_limit, - min_table_split_size: config.meta.move_table_size_limit, - table_write_throughput_threshold: config.meta.table_write_throughput_threshold, - min_table_split_write_throughput: config.meta.min_table_split_write_throughput, - partition_vnode_count: config.meta.partition_vnode_count, - do_not_config_object_storage_lifecycle: config - .meta - .do_not_config_object_storage_lifecycle, - compaction_task_max_heartbeat_interval_secs: config - .meta - .compaction_task_max_heartbeat_interval_secs, - compaction_config: Some(config.meta.compaction_config), - }, - config.system.into_init_system_params(), - ) - .await - .unwrap(); - - match leader_lost_handle { - None => { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - tracing::info!("receive ctrl+c"); - shutdown_send.send(()).unwrap(); - join_handle.await.unwrap() - } - res = &mut join_handle => res.unwrap(), - }; - } - Some(mut handle) => { - tokio::select! { - _ = &mut handle => { - tracing::info!("receive leader lost signal"); - // When we lose leadership, we will exit as soon as possible. - } - _ = tokio::signal::ctrl_c() => { - tracing::info!("receive ctrl+c"); - shutdown_send.send(()).unwrap(); - join_handle.await.unwrap(); - handle.abort(); - } - res = &mut join_handle => { - res.unwrap(); - handle.abort(); - }, - }; - } - }; - }) -} -fn validate_config(config: &RwConfig) { - if config.meta.meta_leader_lease_secs <= 2 { - let error_msg = "meta leader lease secs should be larger than 2"; - tracing::error!(error_msg); - panic!("{}", error_msg); - } +#[derive(Debug)] +pub enum MetaStoreBackend { + Etcd { + endpoints: Vec, + credentials: Option<(String, String)>, + }, + Mem, } diff --git a/src/meta/src/manager/catalog/database.rs b/src/meta/src/manager/catalog/database.rs index 705474dd27a6d..62b5692ce82ba 100644 --- a/src/meta/src/manager/catalog/database.rs +++ b/src/meta/src/manager/catalog/database.rs @@ -16,10 +16,12 @@ use std::collections::hash_map::Entry; use std::collections::{BTreeMap, HashMap, HashSet}; use itertools::Itertools; +use risingwave_common::bail; use risingwave_common::catalog::TableOption; use risingwave_pb::catalog::table::TableType; use risingwave_pb::catalog::{ - Connection, Database, Function, Index, Schema, Sink, Source, Table, View, + Connection, CreateType, Database, Function, Index, PbStreamJobStatus, Schema, Sink, Source, + StreamJobStatus, Table, View, }; use super::{ConnectionId, DatabaseId, FunctionId, RelationId, SchemaId, SinkId, SourceId, ViewId}; @@ -147,10 +149,31 @@ impl DatabaseManager { ( self.databases.values().cloned().collect_vec(), self.schemas.values().cloned().collect_vec(), - self.tables.values().cloned().collect_vec(), + self.tables + .values() + .filter(|t| { + t.stream_job_status == PbStreamJobStatus::Unspecified as i32 + || t.stream_job_status == PbStreamJobStatus::Created as i32 + }) + .cloned() + .collect_vec(), self.sources.values().cloned().collect_vec(), - self.sinks.values().cloned().collect_vec(), - self.indexes.values().cloned().collect_vec(), + self.sinks + .values() + .filter(|t| { + t.stream_job_status == PbStreamJobStatus::Unspecified as i32 + || t.stream_job_status == PbStreamJobStatus::Created as i32 + }) + .cloned() + .collect_vec(), + self.indexes + .values() + .filter(|t| { + t.stream_job_status == PbStreamJobStatus::Unspecified as i32 + || t.stream_job_status == PbStreamJobStatus::Created as i32 + }) + .cloned() + .collect_vec(), self.views.values().cloned().collect_vec(), self.functions.values().cloned().collect_vec(), self.connections.values().cloned().collect_vec(), @@ -173,12 +196,16 @@ impl DatabaseManager { } pub fn check_relation_name_duplicated(&self, relation_key: &RelationKey) -> MetaResult<()> { - if self.tables.values().any(|x| { + if let Some(t) = self.tables.values().find(|x| { x.database_id == relation_key.0 && x.schema_id == relation_key.1 && x.name.eq(&relation_key.2) }) { - Err(MetaError::catalog_duplicated("table", &relation_key.2)) + if t.stream_job_status == StreamJobStatus::Creating as i32 { + bail!("table is in creating procedure: {}", t.id); + } else { + Err(MetaError::catalog_duplicated("table", &relation_key.2)) + } } else if self.sources.values().any(|x| { x.database_id == relation_key.0 && x.schema_id == relation_key.1 @@ -237,9 +264,22 @@ impl DatabaseManager { self.databases.values().cloned().collect_vec() } - pub fn list_creating_tables(&self) -> Vec
{ - self.in_progress_creating_tables + pub fn list_creating_background_mvs(&self) -> Vec
{ + self.tables + .values() + .filter(|&t| { + t.stream_job_status == PbStreamJobStatus::Creating as i32 + && t.table_type == TableType::MaterializedView as i32 + && t.create_type == CreateType::Background as i32 + }) + .cloned() + .collect_vec() + } + + pub fn list_persisted_creating_tables(&self) -> Vec
{ + self.tables .values() + .filter(|&t| t.stream_job_status == PbStreamJobStatus::Creating as i32) .cloned() .collect_vec() } @@ -368,10 +408,12 @@ impl DatabaseManager { .contains(&relation.clone()) } + /// For all types of DDL pub fn mark_creating(&mut self, relation: &RelationKey) { self.in_progress_creation_tracker.insert(relation.clone()); } + /// Only for streaming DDL pub fn mark_creating_streaming_job(&mut self, table_id: TableId, key: RelationKey) { self.in_progress_creation_streaming_job .insert(table_id, key); @@ -396,6 +438,11 @@ impl DatabaseManager { self.in_progress_creation_streaming_job.keys().cloned() } + pub fn clear_creating_stream_jobs(&mut self) { + self.in_progress_creation_tracker.clear(); + self.in_progress_creation_streaming_job.clear(); + } + pub fn mark_creating_tables(&mut self, tables: &[Table]) { self.in_progress_creating_tables .extend(tables.iter().map(|t| (t.id, t.clone()))); diff --git a/src/meta/src/manager/catalog/fragment.rs b/src/meta/src/manager/catalog/fragment.rs index 1a74608c848a1..8b26b8afa11d9 100644 --- a/src/meta/src/manager/catalog/fragment.rs +++ b/src/meta/src/manager/catalog/fragment.rs @@ -43,7 +43,7 @@ use crate::model::{ }; use crate::storage::Transaction; use crate::stream::{SplitAssignment, TableRevision}; -use crate::MetaResult; +use crate::{MetaError, MetaResult}; pub struct FragmentManagerCore { table_fragments: BTreeMap, @@ -163,6 +163,56 @@ impl FragmentManager { map.values().cloned().collect() } + /// The `table_ids` here should correspond to stream jobs. + /// We get their corresponding table fragment, and from there, + /// we get the actors that are in the table fragment. + pub async fn get_table_id_actor_mapping( + &self, + table_ids: &[TableId], + ) -> HashMap> { + let map = &self.core.read().await.table_fragments; + let mut table_map = HashMap::new(); + for table_id in table_ids { + if let Some(table_fragment) = map.get(table_id) { + let mut actors = vec![]; + for fragment in table_fragment.fragments.values() { + for actor in &fragment.actors { + actors.push(actor.actor_id) + } + } + table_map.insert(*table_id, actors); + } + } + table_map + } + + /// Gets the counts for each upstream relation that each stream job + /// indicated by `table_ids` depends on. + /// For example in the following query: + /// ```sql + /// CREATE MATERIALIZED VIEW m1 AS + /// SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t2.b = t3.b + /// ``` + /// + /// We have t1 occurring once, and t2 occurring once. + pub async fn get_upstream_relation_counts( + &self, + table_ids: &[TableId], + ) -> HashMap> { + let map = &self.core.read().await.table_fragments; + let mut upstream_relation_counts = HashMap::new(); + for table_id in table_ids { + if let Some(table_fragments) = map.get(table_id) { + let dependent_ids = table_fragments.dependent_table_ids(); + let r = upstream_relation_counts.insert(*table_id, dependent_ids); + assert!(r.is_none(), "Each table_id should be unique!") + } else { + upstream_relation_counts.insert(*table_id, HashMap::new()); + } + } + upstream_relation_counts + } + pub fn get_mv_id_to_internal_table_ids_mapping(&self) -> Option)>> { match self.core.try_read() { Ok(core) => Some( @@ -231,10 +281,11 @@ impl FragmentManager { table_id: &TableId, ) -> MetaResult { let map = &self.core.read().await.table_fragments; - Ok(map - .get(table_id) - .cloned() - .with_context(|| format!("table_fragment not exist: id={}", table_id))?) + if let Some(table_fragment) = map.get(table_id) { + Ok(table_fragment.clone()) + } else { + Err(MetaError::fragment_not_found(table_id.table_id)) + } } pub async fn select_table_fragments_by_ids( @@ -244,15 +295,32 @@ impl FragmentManager { let map = &self.core.read().await.table_fragments; let mut table_fragments = Vec::with_capacity(table_ids.len()); for table_id in table_ids { - table_fragments.push( - map.get(table_id) - .cloned() - .with_context(|| format!("table_fragment not exist: id={}", table_id))?, - ); + table_fragments.push(if let Some(table_fragment) = map.get(table_id) { + table_fragment.clone() + } else { + return Err(MetaError::fragment_not_found(table_id.table_id)); + }); } Ok(table_fragments) } + pub async fn get_table_id_table_fragment_map( + &self, + table_ids: &[TableId], + ) -> MetaResult> { + let map = &self.core.read().await.table_fragments; + let mut id_to_fragment = HashMap::new(); + for table_id in table_ids { + let table_fragment = if let Some(table_fragment) = map.get(table_id) { + table_fragment.clone() + } else { + return Err(MetaError::fragment_not_found(table_id.table_id)); + }; + id_to_fragment.insert(*table_id, table_fragment); + } + Ok(id_to_fragment) + } + /// Start create a new `TableFragments` and insert it into meta store, currently the actors' /// state is `ActorState::Inactive` and the table fragments' state is `State::Initial`. pub async fn start_create_table_fragments( @@ -499,6 +567,8 @@ impl FragmentManager { /// Drop table fragments info and remove downstream actor infos in fragments from its dependent /// tables. + /// If table fragments already deleted, this should just be noop, + /// the delete function (`table_fragments.remove`) will not return an error. pub async fn drop_table_fragments_vec(&self, table_ids: &HashSet) -> MetaResult<()> { let mut guard = self.core.write().await; let current_revision = guard.table_revision; @@ -514,7 +584,7 @@ impl FragmentManager { table_fragments.remove(table_fragment.table_id()); let chain_actor_ids = table_fragment.chain_actor_ids(); let dependent_table_ids = table_fragment.dependent_table_ids(); - for dependent_table_id in dependent_table_ids { + for (dependent_table_id, _) in dependent_table_ids { if table_ids.contains(&dependent_table_id) { continue; } diff --git a/src/meta/src/manager/catalog/mod.rs b/src/meta/src/manager/catalog/mod.rs index 870bda690bfe1..fbbb62e48a626 100644 --- a/src/meta/src/manager/catalog/mod.rs +++ b/src/meta/src/manager/catalog/mod.rs @@ -17,7 +17,7 @@ mod fragment; mod user; mod utils; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::iter; use std::option::Option::Some; use std::sync::Arc; @@ -32,9 +32,10 @@ use risingwave_common::catalog::{ DEFAULT_SUPER_USER_FOR_PG_ID, DEFAULT_SUPER_USER_ID, SYSTEM_SCHEMAS, }; use risingwave_common::{bail, ensure}; -use risingwave_pb::catalog::table::OptionalAssociatedSourceId; +use risingwave_pb::catalog::table::{OptionalAssociatedSourceId, TableType}; use risingwave_pb::catalog::{ - Connection, Database, Function, Index, Schema, Sink, Source, Table, View, + Connection, CreateType, Database, Function, Index, PbStreamJobStatus, Schema, Sink, Source, + StreamJobStatus, Table, View, }; use risingwave_pb::meta::subscribe_response::{Info, Operation}; use risingwave_pb::user::grant_privilege::{ActionWithGrantOption, Object}; @@ -44,7 +45,7 @@ use tokio::sync::{Mutex, MutexGuard}; use user::*; use crate::manager::{IdCategory, MetaSrvEnv, NotificationVersion, StreamingJob}; -use crate::model::{BTreeMapTransaction, MetadataModel, ValTransaction}; +use crate::model::{BTreeMapTransaction, MetadataModel, TableFragments, ValTransaction}; use crate::storage::Transaction; use crate::{MetaError, MetaResult}; @@ -79,7 +80,7 @@ macro_rules! commit_meta_with_trx { async { // Apply the change in `ValTransaction` to trx $( - $val_txn.apply_to_txn(&mut $trx)?; + $val_txn.apply_to_txn(&mut $trx).await?; )* // Commit to meta store $manager.env.meta_store().txn($trx).await?; @@ -115,6 +116,7 @@ use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_common::util::epoch::Epoch; use risingwave_pb::meta::cancel_creating_jobs_request::CreatingJobInfo; use risingwave_pb::meta::relation::RelationInfo; +use risingwave_pb::meta::table_fragments::State; use risingwave_pb::meta::{Relation, RelationGroup}; pub(crate) use {commit_meta, commit_meta_with_trx}; @@ -630,9 +632,13 @@ impl CatalogManager { pub async fn start_create_stream_job_procedure( &self, stream_job: &StreamingJob, + internal_tables: Vec
, ) -> MetaResult<()> { match stream_job { - StreamingJob::MaterializedView(table) => self.start_create_table_procedure(table).await, + StreamingJob::MaterializedView(table) => { + self.start_create_table_procedure(table, internal_tables) + .await + } StreamingJob::Sink(sink) => self.start_create_sink_procedure(sink).await, StreamingJob::Index(index, index_table) => { self.start_create_index_procedure(index, index_table).await @@ -642,7 +648,7 @@ impl CatalogManager { self.start_create_table_procedure_with_source(source, table) .await } else { - self.start_create_table_procedure(table).await + self.start_create_table_procedure(table, vec![]).await } } } @@ -695,7 +701,11 @@ impl CatalogManager { } /// This is used for both `CREATE TABLE` and `CREATE MATERIALIZED VIEW`. - pub async fn start_create_table_procedure(&self, table: &Table) -> MetaResult<()> { + pub async fn start_create_table_procedure( + &self, + table: &Table, + internal_tables: Vec
, + ) -> MetaResult<()> { let core = &mut *self.core.lock().await; let database_core = &mut core.database; let user_core = &mut core.user; @@ -708,43 +718,175 @@ impl CatalogManager { #[cfg(not(test))] user_core.ensure_user_id(table.owner)?; let key = (table.database_id, table.schema_id, table.name.clone()); + database_core.check_relation_name_duplicated(&key)?; - if database_core.has_in_progress_creation(&key) { - bail!("table is in creating procedure"); - } else { - database_core.mark_creating(&key); - database_core.mark_creating_streaming_job(table.id, key); - for &dependent_relation_id in &table.dependent_relations { - database_core.increase_ref_count(dependent_relation_id); + let mut tables = BTreeMapTransaction::new(&mut database_core.tables); + assert!( + !tables.contains_key(&table.id), + "table must not already exist in meta" + ); + for table in internal_tables { + tables.insert(table.id, table); + } + tables.insert(table.id, table.clone()); + commit_meta!(self, tables)?; + + for &dependent_relation_id in &table.dependent_relations { + database_core.increase_ref_count(dependent_relation_id); + } + user_core.increase_ref(table.owner); + Ok(()) + } + + fn assert_table_creating(tables: &BTreeMap, table: &Table) { + if let Some(t) = tables.get(&table.id) + && let Ok(StreamJobStatus::Creating) = t.get_stream_job_status() + {} else { + panic!("Table must be in creating procedure: {table:#?}") + } + } + + pub async fn assert_tables_deleted(&self, table_ids: Vec) { + let core = self.core.lock().await; + let tables = &core.database.tables; + for id in table_ids { + assert_eq!(tables.get(&id), None,) + } + } + + /// We clean the following tables: + /// 1. Those which belonged to incomplete Foreground jobs. + /// 2. Those which did not persist their table fragments, we can't recover these. + /// 3. Those which were only initialized, but not actually running yet. + /// 4. From 2, since we don't have internal table ids from the fragments, + /// we can detect hanging table ids by just finding all internal ids + /// with: + /// 1. `stream_job_status` = CREATING + /// 2. Not belonging to a background stream job. + /// Clean up these hanging tables by the id. + pub async fn clean_dirty_tables(&self, fragment_manager: FragmentManagerRef) -> MetaResult<()> { + let creating_tables: Vec
= self.list_persisted_creating_tables().await; + tracing::debug!( + "creating_tables ids: {:#?}", + creating_tables.iter().map(|t| t.id).collect_vec() + ); + let mut reserved_internal_tables = HashSet::new(); + let mut tables_to_clean = vec![]; + let mut internal_tables_to_clean = vec![]; + for table in creating_tables { + tracing::trace!( + "checking table {} definition: {}, create_type: {:#?}, table_type: {:#?}", + table.id, + table.definition, + table.get_create_type().unwrap_or(CreateType::Foreground), + table.get_table_type().unwrap(), + ); + // 1. Incomplete Foreground jobs + if table.create_type == CreateType::Foreground as i32 + && table.table_type != TableType::Internal as i32 + // || table.create_type == CreateType::Unspecified as i32 + { + tracing::debug!("cleaning table_id for foreground: {:#?}", table.id); + tables_to_clean.push(table); + continue; + } + if table.table_type == TableType::Internal as i32 { + internal_tables_to_clean.push(table); + continue; + } + + // 2. No table fragments + assert_ne!(table.table_type, TableType::Internal as i32); + match fragment_manager + .select_table_fragments_by_table_id(&table.id.into()) + .await + { + Err(e) => { + if e.is_fragment_not_found() { + tracing::debug!("cleaning table_id for no fragments: {:#?}", table.id); + tables_to_clean.push(table); + continue; + } else { + return Err(e); + } + } + Ok(fragment) => { + let fragment: TableFragments = fragment; + // 3. For those in initial state (i.e. not running / created), + // we should purge them. + if fragment.state() == State::Initial { + tracing::debug!("cleaning table_id no initial state: {:#?}", table.id); + tables_to_clean.push(table); + continue; + } else { + assert_eq!(table.create_type, CreateType::Background as i32); + // 4. Get all the corresponding internal tables, the rest we can purge. + for id in fragment.internal_table_ids() { + reserved_internal_tables.insert(id); + } + continue; + } + } + } + } + for t in internal_tables_to_clean { + if !reserved_internal_tables.contains(&t.id) { + tracing::debug!( + "cleaning table_id for internal tables not reserved: {:#?}", + t.id + ); + tables_to_clean.push(t); } - user_core.increase_ref(table.owner); - Ok(()) } + + let core = &mut *self.core.lock().await; + let database_core = &mut core.database; + let tables = &mut database_core.tables; + let mut tables = BTreeMapTransaction::new(tables); + for table in &tables_to_clean { + tracing::debug!("cleaning table_id: {}", table.id); + let table = tables.remove(table.id); + assert!(table.is_some()) + } + commit_meta!(self, tables)?; + + database_core.clear_creating_stream_jobs(); + let user_core = &mut core.user; + for table in &tables_to_clean { + // If table type is internal, no need to update the ref count OR + // user ref count. + if table.table_type != TableType::Internal as i32 { + // Recovered when init database manager. + for relation_id in &table.dependent_relations { + database_core.decrease_ref_count(*relation_id); + } + // Recovered when init user manager. + tracing::debug!("decrease ref for {}", table.id); + user_core.decrease_ref(table.owner); + } + } + Ok(()) } /// This is used for both `CREATE TABLE` and `CREATE MATERIALIZED VIEW`. pub async fn finish_create_table_procedure( &self, - internal_tables: Vec
, - table: Table, + mut internal_tables: Vec
, + mut table: Table, ) -> MetaResult { let core = &mut *self.core.lock().await; let database_core = &mut core.database; - let mut tables = BTreeMapTransaction::new(&mut database_core.tables); - let key = (table.database_id, table.schema_id, table.name.clone()); - assert!( - !tables.contains_key(&table.id) - && database_core.in_progress_creation_tracker.contains(&key), - "table must be in creating procedure" - ); - database_core.in_progress_creation_tracker.remove(&key); - database_core - .in_progress_creation_streaming_job - .remove(&table.id); + let tables = &mut database_core.tables; + if cfg!(not(test)) { + Self::assert_table_creating(tables, &table); + } + let mut tables = BTreeMapTransaction::new(tables); + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); - for table in &internal_tables { + for table in &mut internal_tables { + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); } commit_meta!(self, tables)?; @@ -768,23 +910,62 @@ impl CatalogManager { Ok(version) } - pub async fn cancel_create_table_procedure(&self, table: &Table) { - let core = &mut *self.core.lock().await; - let database_core = &mut core.database; - let user_core = &mut core.user; - let key = (table.database_id, table.schema_id, table.name.clone()); - assert!( - !database_core.tables.contains_key(&table.id) - && database_core.has_in_progress_creation(&key), - "table must be in creating procedure" - ); + /// Used to cleanup states in stream manager. + /// It is required because failure may not necessarily happen in barrier, + /// e.g. when cordon nodes. + /// and we still need some way to cleanup the state. + pub async fn cancel_create_table_procedure( + &self, + table_id: TableId, + internal_table_ids: Vec, + ) -> MetaResult<()> { + let table = { + let core = &mut self.core.lock().await; + let database_core = &mut core.database; + let tables = &mut database_core.tables; + let Some(table) = tables.get(&table_id).cloned() else { + tracing::warn!( + "table_id {} missing when attempting to cancel job, could be cleaned on recovery", + table_id + ); + return Ok(()); + }; + table + }; - database_core.unmark_creating(&key); - database_core.unmark_creating_streaming_job(table.id); - for &dependent_relation_id in &table.dependent_relations { - database_core.decrease_ref_count(dependent_relation_id); + tracing::trace!("cleanup tables for {}", table.id); + { + let core = &mut self.core.lock().await; + let database_core = &mut core.database; + + let mut table_ids = vec![table.id]; + table_ids.extend(internal_table_ids); + + let tables = &mut database_core.tables; + let mut tables = BTreeMapTransaction::new(tables); + for table_id in table_ids { + let res = tables.remove(table_id); + assert!(res.is_some()); + } + commit_meta!(self, tables)?; } - user_core.decrease_ref(table.owner); + + { + let core = &mut self.core.lock().await; + { + let user_core = &mut core.user; + user_core.decrease_ref(table.owner); + } + + { + let database_core = &mut core.database; + for &dependent_relation_id in &table.dependent_relations { + database_core.decrease_ref_count(dependent_relation_id); + } + } + } + + Ok(()) } /// return id of streaming jobs in the database which need to be dropped by stream manager. @@ -974,7 +1155,7 @@ impl CatalogManager { match drop_mode { DropMode::Restrict => { return Err(MetaError::permission_denied(format!( - "Fail to delete table `{}` because {} other relation(s) depend on it", + "Fail to delete index table `{}` because {} other relation(s) depend on it", index_table.name, ref_count ))); } @@ -1590,6 +1771,7 @@ impl CatalogManager { // 2. rename index name. index.name = index_name.to_string(); index_table.name = index_name.to_string(); + index_table.definition = alter_relation_rename(&index_table.definition, index_name); let mut indexes = BTreeMapTransaction::new(&mut database_core.indexes); let mut tables = BTreeMapTransaction::new(&mut database_core.tables); indexes.insert(index_id, index.clone()); @@ -1731,8 +1913,8 @@ impl CatalogManager { pub async fn finish_create_table_procedure_with_source( &self, source: Source, - mview: Table, - internal_tables: Vec
, + mut mview: Table, + mut internal_tables: Vec
, ) -> MetaResult { let core = &mut *self.core.lock().await; let database_core = &mut core.database; @@ -1763,8 +1945,10 @@ impl CatalogManager { .remove(&mview.id); sources.insert(source.id, source.clone()); + mview.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(mview.id, mview.clone()); - for table in &internal_tables { + for table in &mut internal_tables { + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); } commit_meta!(self, sources, tables)?; @@ -1855,8 +2039,7 @@ impl CatalogManager { let user_core = &mut core.user; let key = (index.database_id, index.schema_id, index.name.clone()); assert!( - !database_core.indexes.contains_key(&index.id) - && database_core.has_in_progress_creation(&key), + !database_core.indexes.contains_key(&index.id), "index must be in creating procedure" ); @@ -1871,9 +2054,9 @@ impl CatalogManager { pub async fn finish_create_index_procedure( &self, - internal_tables: Vec
, - index: Index, - table: Table, + mut internal_tables: Vec
, + mut index: Index, + mut table: Table, ) -> MetaResult { let core = &mut *self.core.lock().await; let database_core = &mut core.database; @@ -1892,10 +2075,13 @@ impl CatalogManager { .in_progress_creation_streaming_job .remove(&table.id); + index.stream_job_status = PbStreamJobStatus::Created.into(); indexes.insert(index.id, index.clone()); + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); - for table in &internal_tables { + for table in &mut internal_tables { + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); } commit_meta!(self, indexes, tables)?; @@ -1956,8 +2142,8 @@ impl CatalogManager { pub async fn finish_create_sink_procedure( &self, - internal_tables: Vec
, - sink: Sink, + mut internal_tables: Vec
, + mut sink: Sink, ) -> MetaResult { let core = &mut *self.core.lock().await; let database_core = &mut core.database; @@ -1975,8 +2161,10 @@ impl CatalogManager { .in_progress_creation_streaming_job .remove(&sink.id); + sink.stream_job_status = PbStreamJobStatus::Created.into(); sinks.insert(sink.id, sink.clone()); - for table in &internal_tables { + for table in &mut internal_tables { + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); } commit_meta!(self, sinks, tables)?; @@ -2006,8 +2194,7 @@ impl CatalogManager { let user_core = &mut core.user; let key = (sink.database_id, sink.schema_id, sink.name.clone()); assert!( - !database_core.sinks.contains_key(&sink.id) - && database_core.has_in_progress_creation(&key), + !database_core.sinks.contains_key(&sink.id), "sink must be in creating procedure" ); @@ -2021,7 +2208,10 @@ impl CatalogManager { } /// This is used for `ALTER TABLE ADD/DROP COLUMN`. - pub async fn start_replace_table_procedure(&self, table: &Table) -> MetaResult<()> { + pub async fn start_replace_table_procedure(&self, stream_job: &StreamingJob) -> MetaResult<()> { + let StreamingJob::Table(source, table) = stream_job else { + unreachable!("unexpected job: {stream_job:?}") + }; let core = &mut *self.core.lock().await; let database_core = &mut core.database; database_core.ensure_database_id(table.database_id)?; @@ -2044,6 +2234,13 @@ impl CatalogManager { if database_core.has_in_progress_creation(&key) { bail!("table is in altering procedure"); } else { + if let Some(source) = source { + let source_key = (source.database_id, source.schema_id, source.name.clone()); + if database_core.has_in_progress_creation(&source_key) { + bail!("source is in altering procedure"); + } + database_core.mark_creating(&source_key); + } database_core.mark_creating(&key); Ok(()) } @@ -2052,20 +2249,38 @@ impl CatalogManager { /// This is used for `ALTER TABLE ADD/DROP COLUMN`. pub async fn finish_replace_table_procedure( &self, + source: &Option, table: &Table, table_col_index_mapping: ColIndexMapping, ) -> MetaResult { let core = &mut *self.core.lock().await; let database_core = &mut core.database; let mut tables = BTreeMapTransaction::new(&mut database_core.tables); + let mut sources = BTreeMapTransaction::new(&mut database_core.sources); let mut indexes = BTreeMapTransaction::new(&mut database_core.indexes); let key = (table.database_id, table.schema_id, table.name.clone()); + assert!( tables.contains_key(&table.id) && database_core.in_progress_creation_tracker.contains(&key), "table must exist and be in altering procedure" ); + if let Some(source) = source { + let source_key = (source.database_id, source.schema_id, source.name.clone()); + assert!( + sources.contains_key(&source.id) + && database_core + .in_progress_creation_tracker + .contains(&source_key), + "source must exist and be in altering procedure" + ); + sources.insert(source.id, source.clone()); + database_core + .in_progress_creation_tracker + .remove(&source_key); + } + let index_ids: Vec<_> = indexes .tree_ref() .iter() @@ -2091,8 +2306,10 @@ impl CatalogManager { // TODO: Here we reuse the `creation` tracker for `alter` procedure, as an `alter` must database_core.in_progress_creation_tracker.remove(&key); + let mut table = table.clone(); + table.stream_job_status = PbStreamJobStatus::Created.into(); tables.insert(table.id, table.clone()); - commit_meta!(self, tables, indexes)?; + commit_meta!(self, tables, indexes, sources)?; // Group notification let version = self @@ -2100,9 +2317,12 @@ impl CatalogManager { Operation::Update, Info::RelationGroup(RelationGroup { relations: vec![Relation { - relation_info: RelationInfo::Table(table.to_owned()).into(), + relation_info: RelationInfo::Table(table).into(), }] .into_iter() + .chain(source.iter().map(|source| Relation { + relation_info: RelationInfo::Source(source.to_owned()).into(), + })) .chain(updated_indexes.into_iter().map(|index| Relation { relation_info: RelationInfo::Index(index).into(), })) @@ -2115,7 +2335,13 @@ impl CatalogManager { } /// This is used for `ALTER TABLE ADD/DROP COLUMN`. - pub async fn cancel_replace_table_procedure(&self, table: &Table) -> MetaResult<()> { + pub async fn cancel_replace_table_procedure( + &self, + stream_job: &StreamingJob, + ) -> MetaResult<()> { + let StreamingJob::Table(source, table) = stream_job else { + unreachable!("unexpected job: {stream_job:?}") + }; let core = &mut *self.core.lock().await; let database_core = &mut core.database; let key = (table.database_id, table.schema_id, table.name.clone()); @@ -2128,6 +2354,17 @@ impl CatalogManager { "table must exist and must be in altering procedure" ); + if let Some(source) = source { + let source_key = (source.database_id, source.schema_id, source.name.clone()); + assert!( + database_core.sources.contains_key(&source.id) + && database_core.has_in_progress_creation(&source_key), + "source must exist and must be in altering procedure" + ); + + database_core.unmark_creating(&source_key); + } + // TODO: Here we reuse the `creation` tracker for `alter` procedure, as an `alter` must // occur after it's created. We may need to add a new tracker for `alter` procedure.s database_core.unmark_creating(&key); @@ -2146,6 +2383,24 @@ impl CatalogManager { self.core.lock().await.database.list_tables() } + /// Lists table catalogs for mviews, without their internal tables. + pub async fn list_creating_background_mvs(&self) -> Vec
{ + self.core + .lock() + .await + .database + .list_creating_background_mvs() + } + + /// Lists table catalogs for all tables with `stream_job_status=CREATING`. + pub async fn list_persisted_creating_tables(&self) -> Vec
{ + self.core + .lock() + .await + .database + .list_persisted_creating_tables() + } + pub async fn get_all_table_options(&self) -> HashMap { self.core.lock().await.database.get_all_table_options() } diff --git a/src/meta/src/manager/catalog/utils.rs b/src/meta/src/manager/catalog/utils.rs index 7e26e32ee62eb..ea579867fc320 100644 --- a/src/meta/src/manager/catalog/utils.rs +++ b/src/meta/src/manager/catalog/utils.rs @@ -401,7 +401,7 @@ impl ReplaceTableExprRewriter { #[cfg(test)] mod tests { - use crate::manager::catalog::utils::{alter_relation_rename, alter_relation_rename_refs}; + use super::*; #[test] fn test_alter_table_rename() { diff --git a/src/meta/src/manager/cluster.rs b/src/meta/src/manager/cluster.rs index da5b4fce20711..a31979c8871b0 100644 --- a/src/meta/src/manager/cluster.rs +++ b/src/meta/src/manager/cluster.rs @@ -261,7 +261,7 @@ impl ClusterManager { .unwrap() .is_unschedulable = target; - var_txn.apply_to_txn(&mut txn)?; + var_txn.apply_to_txn(&mut txn).await?; var_txns.push(var_txn); } } @@ -314,7 +314,7 @@ impl ClusterManager { worker_id: WorkerId, info: Vec, ) -> MetaResult<()> { - tracing::trace!(target: "events::meta::server_heartbeat", worker_id = worker_id, "receive heartbeat"); + tracing::debug!(target: "events::meta::server_heartbeat", worker_id, "receive heartbeat"); let mut core = self.core.write().await; for worker in core.workers.values_mut() { if worker.worker_id() == worker_id { @@ -557,7 +557,7 @@ impl ClusterManagerCore { worker_id ); - var_txn.apply_to_txn(&mut txn)?; + var_txn.apply_to_txn(&mut txn).await?; var_txns.push(var_txn); } } @@ -728,11 +728,8 @@ mod tests { async fn test_cluster_manager() -> MetaResult<()> { let env = MetaSrvEnv::for_test().await; - let cluster_manager = Arc::new( - ClusterManager::new(env.clone(), Duration::new(0, 0)) - .await - .unwrap(), - ); + let cluster_manager = + Arc::new(ClusterManager::new(env, Duration::new(0, 0)).await.unwrap()); let mut worker_nodes = Vec::new(); let worker_count = 5usize; @@ -839,11 +836,8 @@ mod tests { async fn test_cluster_manager_schedulability() -> MetaResult<()> { let env = MetaSrvEnv::for_test().await; - let cluster_manager = Arc::new( - ClusterManager::new(env.clone(), Duration::new(0, 0)) - .await - .unwrap(), - ); + let cluster_manager = + Arc::new(ClusterManager::new(env, Duration::new(0, 0)).await.unwrap()); let worker_node = cluster_manager .add_worker_node( WorkerType::ComputeNode, diff --git a/src/meta/src/manager/env.rs b/src/meta/src/manager/env.rs index e8a53f1db8359..16a4bcb248b23 100644 --- a/src/meta/src/manager/env.rs +++ b/src/meta/src/manager/env.rs @@ -18,16 +18,20 @@ use std::sync::Arc; use risingwave_common::config::{CompactionConfig, DefaultParallelism}; use risingwave_pb::meta::SystemParams; use risingwave_rpc_client::{ConnectorClient, StreamClientPool, StreamClientPoolRef}; +use sea_orm::EntityTrait; use super::{SystemParamsManager, SystemParamsManagerRef}; +use crate::controller::system_param::{SystemParamsController, SystemParamsControllerRef}; +use crate::controller::SqlMetaStore; use crate::manager::{ IdGeneratorManager, IdGeneratorManagerRef, IdleManager, IdleManagerRef, NotificationManager, NotificationManagerRef, }; use crate::model::ClusterId; +use crate::model_v2::prelude::Cluster; +use crate::storage::MetaStoreRef; #[cfg(any(test, feature = "test"))] -use crate::storage::MemStore; -use crate::storage::{MetaStoreBoxExt, MetaStoreRef}; +use crate::storage::{MemStore, MetaStoreBoxExt}; use crate::MetaResult; /// [`MetaSrvEnv`] is the global environment in Meta service. The instance will be shared by all @@ -40,6 +44,9 @@ pub struct MetaSrvEnv { /// meta store. meta_store: MetaStoreRef, + /// sql meta store. + meta_store_sql: Option, + /// notification manager. notification_manager: NotificationManagerRef, @@ -52,6 +59,9 @@ pub struct MetaSrvEnv { /// system param manager. system_params_manager: SystemParamsManagerRef, + /// system param controller. + system_params_controller: Option, + /// Unique identifier of the cluster. cluster_id: ClusterId, @@ -205,13 +215,14 @@ impl MetaSrvEnv { opts: MetaOpts, init_system_params: SystemParams, meta_store: MetaStoreRef, + meta_store_sql: Option, ) -> MetaResult { // change to sync after refactor `IdGeneratorManager::new` sync. let id_gen_manager = Arc::new(IdGeneratorManager::new(meta_store.clone()).await); let stream_client_pool = Arc::new(StreamClientPool::default()); let notification_manager = Arc::new(NotificationManager::new(meta_store.clone()).await); let idle_manager = Arc::new(IdleManager::new(opts.max_idle_ms)); - let (cluster_id, cluster_first_launch) = + let (mut cluster_id, cluster_first_launch) = if let Some(id) = ClusterId::from_meta_store(&meta_store).await? { (id, false) } else { @@ -221,21 +232,43 @@ impl MetaSrvEnv { SystemParamsManager::new( meta_store.clone(), notification_manager.clone(), - init_system_params, + init_system_params.clone(), cluster_first_launch, ) .await?, ); + // TODO: remove `cluster_first_launch` and check equality of cluster id stored in hummock to + // make sure the data dir of hummock is not used by another cluster. + let system_params_controller = match &meta_store_sql { + Some(store) => { + cluster_id = Cluster::find() + .one(&store.conn) + .await? + .map(|c| c.cluster_id.to_string().into()) + .unwrap(); + Some(Arc::new( + SystemParamsController::new( + store.clone(), + notification_manager.clone(), + init_system_params, + ) + .await?, + )) + } + None => None, + }; let connector_client = ConnectorClient::try_new(opts.connector_rpc_endpoint.as_ref()).await; Ok(Self { id_gen_manager, meta_store, + meta_store_sql, notification_manager, stream_client_pool, idle_manager, system_params_manager, + system_params_controller, cluster_id, cluster_first_launch, connector_client, @@ -251,6 +284,10 @@ impl MetaSrvEnv { &self.meta_store } + pub fn sql_meta_store(&self) -> Option { + self.meta_store_sql.clone() + } + pub fn id_gen_manager_ref(&self) -> IdGeneratorManagerRef { self.id_gen_manager.clone() } @@ -283,6 +320,14 @@ impl MetaSrvEnv { self.system_params_manager.deref() } + pub fn system_params_controller_ref(&self) -> Option { + self.system_params_controller.clone() + } + + pub fn system_params_controller(&self) -> Option<&SystemParamsControllerRef> { + self.system_params_controller.as_ref() + } + pub fn stream_client_pool_ref(&self) -> StreamClientPoolRef { self.stream_client_pool.clone() } @@ -314,6 +359,11 @@ impl MetaSrvEnv { pub async fn for_test_opts(opts: Arc) -> Self { // change to sync after refactor `IdGeneratorManager::new` sync. let meta_store = MemStore::default().into_ref(); + #[cfg(madsim)] + let meta_store_sql: Option = None; + #[cfg(not(madsim))] + let meta_store_sql = Some(SqlMetaStore::for_test().await); + let id_gen_manager = Arc::new(IdGeneratorManager::new(meta_store.clone()).await); let notification_manager = Arc::new(NotificationManager::new(meta_store.clone()).await); let stream_client_pool = Arc::new(StreamClientPool::default()); @@ -329,14 +379,29 @@ impl MetaSrvEnv { .await .unwrap(), ); + let system_params_controller = if let Some(store) = &meta_store_sql { + Some(Arc::new( + SystemParamsController::new( + store.clone(), + notification_manager.clone(), + risingwave_common::system_param::system_params_for_test(), + ) + .await + .unwrap(), + )) + } else { + None + }; Self { id_gen_manager, meta_store, + meta_store_sql, notification_manager, stream_client_pool, idle_manager, system_params_manager, + system_params_controller, cluster_id, cluster_first_launch, connector_client: None, diff --git a/src/meta/src/manager/mod.rs b/src/meta/src/manager/mod.rs index 6f787dba23d09..35642ed0ec143 100644 --- a/src/meta/src/manager/mod.rs +++ b/src/meta/src/manager/mod.rs @@ -18,18 +18,17 @@ mod env; mod id; mod idle; mod notification; -pub(crate) mod sink_coordination; +pub mod sink_coordination; mod streaming_job; mod system_param; -pub(crate) use catalog::*; -pub use cluster::WorkerKey; -pub(crate) use cluster::*; -pub use env::MetaSrvEnv; -pub(crate) use env::*; -pub(crate) use id::*; -pub(crate) use idle::*; -pub(crate) use notification::*; -pub use notification::{LocalNotification, MessageStatus, NotificationManagerRef}; -pub(crate) use streaming_job::*; -pub(crate) use system_param::*; +pub use catalog::*; +pub use cluster::{WorkerKey, *}; +pub use env::{MetaSrvEnv, *}; +pub use id::*; +pub use idle::*; +pub use notification::{LocalNotification, MessageStatus, NotificationManagerRef, *}; +pub use streaming_job::*; +pub use system_param::*; + +pub use super::model_v2::prelude; diff --git a/src/meta/src/manager/notification.rs b/src/meta/src/manager/notification.rs index 5e4172911ba70..96c3c17ba59cd 100644 --- a/src/meta/src/manager/notification.rs +++ b/src/meta/src/manager/notification.rs @@ -18,7 +18,6 @@ use std::sync::Arc; use risingwave_common::system_param::reader::SystemParamsReader; use risingwave_pb::common::{WorkerNode, WorkerType}; -use risingwave_pb::hummock::CompactTask; use risingwave_pb::meta::relation::RelationInfo; use risingwave_pb::meta::subscribe_response::{Info, Operation}; use risingwave_pb::meta::{ @@ -36,12 +35,13 @@ pub type MessageStatus = Status; pub type Notification = Result; pub type NotificationManagerRef = Arc; pub type NotificationVersion = u64; +/// NOTE(kwannoel): This is just ignored, used in background DDL +pub const IGNORED_NOTIFICATION_VERSION: u64 = 0; #[derive(Clone, Debug)] pub enum LocalNotification { WorkerNodeDeleted(WorkerNode), WorkerNodeActivated(WorkerNode), - CompactionTaskNeedCancel(CompactTask), SystemParamsChange(SystemParamsReader), FragmentMappingsUpsert(Vec), FragmentMappingsDelete(Vec), diff --git a/src/meta/src/manager/sink_coordination/coordinator_worker.rs b/src/meta/src/manager/sink_coordination/coordinator_worker.rs index 1a7c42c108661..79f4f5b753aa2 100644 --- a/src/meta/src/manager/sink_coordination/coordinator_worker.rs +++ b/src/meta/src/manager/sink_coordination/coordinator_worker.rs @@ -30,7 +30,6 @@ use risingwave_pb::connector_service::coordinate_response::{ use risingwave_pb::connector_service::{ coordinate_request, coordinate_response, CoordinateRequest, CoordinateResponse, SinkMetadata, }; -use risingwave_rpc_client::ConnectorClient; use tokio::sync::mpsc::UnboundedReceiver; use tonic::Status; use tracing::{error, warn}; @@ -47,7 +46,7 @@ macro_rules! send_await_with_err_check { }; } -pub(crate) struct CoordinatorWorker { +pub struct CoordinatorWorker { param: SinkParam, request_streams: Vec, response_senders: Vec, @@ -55,10 +54,9 @@ pub(crate) struct CoordinatorWorker { } impl CoordinatorWorker { - pub(crate) async fn run( + pub async fn run( first_writer_request: NewSinkWriterRequest, request_rx: UnboundedReceiver, - connector_client: Option, ) { let sink = match build_sink(first_writer_request.param.clone()) { Ok(sink) => sink, @@ -75,7 +73,7 @@ impl CoordinatorWorker { } }; dispatch_sink!(sink, sink, { - let coordinator = match sink.new_coordinator(connector_client).await { + let coordinator = match sink.new_coordinator().await { Ok(coordinator) => coordinator, Err(e) => { error!( @@ -93,7 +91,7 @@ impl CoordinatorWorker { }); } - pub(crate) async fn execute_coordinator( + pub async fn execute_coordinator( first_writer_request: NewSinkWriterRequest, request_rx: UnboundedReceiver, coordinator: impl SinkCommitCoordinator, @@ -168,7 +166,7 @@ impl CoordinatorWorker { registered_vnode.insert(vnode); } - loop { + while remaining_count > 0 { let new_writer_request = self.next_new_writer().await?; if self.param != new_writer_request.param { // TODO: may return error. @@ -191,10 +189,6 @@ impl CoordinatorWorker { registered_vnode.insert(vnode); remaining_count -= 1; } - - if remaining_count == 0 { - break; - } } self.send_to_all_sink_writers(|| { diff --git a/src/meta/src/manager/sink_coordination/manager.rs b/src/meta/src/manager/sink_coordination/manager.rs index b38d70119e576..720a698fa8e72 100644 --- a/src/meta/src/manager/sink_coordination/manager.rs +++ b/src/meta/src/manager/sink_coordination/manager.rs @@ -25,7 +25,6 @@ use risingwave_connector::sink::catalog::SinkId; use risingwave_connector::sink::SinkParam; use risingwave_pb::connector_service::coordinate_request::Msg; use risingwave_pb::connector_service::{coordinate_request, CoordinateRequest, CoordinateResponse}; -use risingwave_rpc_client::ConnectorClient; use tokio::sync::mpsc; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use tokio::sync::oneshot::{channel, Receiver, Sender}; @@ -70,28 +69,21 @@ pub struct SinkCoordinatorManager { } impl SinkCoordinatorManager { - pub(crate) fn start_worker( - connector_client: Option, - ) -> (Self, (JoinHandle<()>, Sender<()>)) { - Self::start_worker_with_spawn_worker( - connector_client, - |writer_request, manager_request_stream, connector_client| { - tokio::spawn(CoordinatorWorker::run( - writer_request, - manager_request_stream, - connector_client, - )) - }, - ) + pub fn start_worker() -> (Self, (JoinHandle<()>, Sender<()>)) { + Self::start_worker_with_spawn_worker(|writer_request, manager_request_stream| { + tokio::spawn(CoordinatorWorker::run( + writer_request, + manager_request_stream, + )) + }) } fn start_worker_with_spawn_worker( - connector_client: Option, spawn_coordinator_worker: impl SpawnCoordinatorFn, ) -> (Self, (JoinHandle<()>, Sender<()>)) { let (request_tx, request_rx) = mpsc::channel(BOUNDED_CHANNEL_SIZE); let (shutdown_tx, shutdown_rx) = channel(); - let worker = ManagerWorker::new(request_rx, shutdown_rx, connector_client); + let worker = ManagerWorker::new(request_rx, shutdown_rx); let join_handle = tokio::spawn(worker.execute(spawn_coordinator_worker)); ( SinkCoordinatorManager { request_tx }, @@ -99,7 +91,7 @@ impl SinkCoordinatorManager { ) } - pub(crate) async fn handle_new_request( + pub async fn handle_new_request( &self, mut request_stream: SinkWriterRequestStream, ) -> Result>, Status> { @@ -151,11 +143,11 @@ impl SinkCoordinatorManager { info!("successfully stop coordinator: {:?}", sink_id); } - pub(crate) async fn reset(&self) { + pub async fn reset(&self) { self.stop_coordinator(None).await; } - pub(crate) async fn stop_sink_coordinator(&self, sink_id: SinkId) { + pub async fn stop_sink_coordinator(&self, sink_id: SinkId) { self.stop_coordinator(Some(sink_id)).await; } } @@ -168,7 +160,6 @@ struct CoordinatorWorkerHandle { } struct ManagerWorker { - connector_client: Option, request_rx: mpsc::Receiver, // Make it option so that it can be polled with &mut SinkManagerWorker shutdown_rx: Option>, @@ -186,26 +177,17 @@ enum ManagerEvent { }, } -trait SpawnCoordinatorFn = FnMut( - NewSinkWriterRequest, - UnboundedReceiver, - Option, - ) -> JoinHandle<()> +trait SpawnCoordinatorFn = FnMut(NewSinkWriterRequest, UnboundedReceiver) -> JoinHandle<()> + Send + 'static; impl ManagerWorker { - fn new( - request_rx: mpsc::Receiver, - shutdown_rx: Receiver<()>, - connector_client: Option, - ) -> Self { + fn new(request_rx: mpsc::Receiver, shutdown_rx: Receiver<()>) -> Self { ManagerWorker { request_rx, shutdown_rx: Some(shutdown_rx), running_coordinator_worker_join_handles: Default::default(), running_coordinator_worker: Default::default(), - connector_client, } } @@ -346,8 +328,7 @@ impl ManagerWorker { } Entry::Vacant(entry) => { let (request_tx, request_rx) = unbounded_channel(); - let connector_client = self.connector_client.clone(); - let join_handle = spawn_coordinator_worker(request, request_rx, connector_client); + let join_handle = spawn_coordinator_worker(request, request_rx); self.running_coordinator_worker_join_handles.push( join_handle .map(move |join_result| (sink_id, join_result)) @@ -420,8 +401,9 @@ mod tests { sink_id, properties: Default::default(), columns: vec![], - pk_indices: vec![], + downstream_pk: vec![], sink_type: SinkType::AppendOnly, + format_desc: None, db_name: "test".into(), sink_from_name: "test".into(), }; @@ -448,10 +430,10 @@ mod tests { ]; let (manager, (_join_handle, _stop_tx)) = - SinkCoordinatorManager::start_worker_with_spawn_worker(None, { + SinkCoordinatorManager::start_worker_with_spawn_worker({ let param = param.clone(); let metadata = metadata.clone(); - move |first_request: NewSinkWriterRequest, new_writer_rx, _| { + move |first_request: NewSinkWriterRequest, new_writer_rx| { let param = param.clone(); let metadata = metadata.clone(); tokio::spawn(async move { @@ -586,6 +568,129 @@ mod tests { .await; } + #[tokio::test] + async fn test_single_writer() { + let sink_id = SinkId::from(1); + let param = SinkParam { + sink_id, + properties: Default::default(), + columns: vec![], + downstream_pk: vec![], + sink_type: SinkType::AppendOnly, + format_desc: None, + db_name: "test".into(), + sink_from_name: "test".into(), + }; + + let epoch1 = 233; + let epoch2 = 234; + + let all_vnode = (0..VirtualNode::COUNT).collect_vec(); + let build_bitmap = |indexes: &[usize]| { + let mut builder = BitmapBuilder::zeroed(VirtualNode::COUNT); + for i in indexes { + builder.set(*i, true); + } + builder.finish() + }; + let vnode = build_bitmap(&all_vnode); + + let metadata = [vec![1u8, 2u8], vec![3u8, 4u8]]; + + let (manager, (_join_handle, _stop_tx)) = + SinkCoordinatorManager::start_worker_with_spawn_worker({ + let param = param.clone(); + let metadata = metadata.clone(); + move |first_request: NewSinkWriterRequest, new_writer_rx| { + let param = param.clone(); + let metadata = metadata.clone(); + tokio::spawn(async move { + // validate the start request + assert_eq!(first_request.param, param); + CoordinatorWorker::execute_coordinator( + first_request, + new_writer_rx, + MockCoordinator::new(0, |epoch, metadata_list, count: &mut usize| { + *count += 1; + let mut metadata_list = metadata_list + .into_iter() + .map(|metadata| match metadata { + SinkMetadata { + metadata: + Some(Metadata::Serialized(SerializedMetadata { + metadata, + })), + } => metadata, + _ => unreachable!(), + }) + .collect_vec(); + metadata_list.sort(); + match *count { + 1 => { + assert_eq!(epoch, epoch1); + assert_eq!(1, metadata_list.len()); + assert_eq!(metadata[0], metadata_list[0]); + } + 2 => { + assert_eq!(epoch, epoch2); + assert_eq!(1, metadata_list.len()); + assert_eq!(metadata[1], metadata_list[0]); + } + _ => unreachable!(), + } + Ok(()) + }), + ) + .await; + }) + } + }); + + let build_client = |vnode| async { + CoordinatorStreamHandle::new_with_init_stream( + param.to_proto(), + vnode, + |stream_req| async { + Ok(tonic::Response::new( + manager + .handle_new_request(stream_req.into_inner().map(Ok).boxed()) + .await + .unwrap() + .boxed(), + )) + }, + ) + .await + .unwrap() + }; + + let mut client = build_client(vnode).await; + + client + .commit( + epoch1, + SinkMetadata { + metadata: Some(Metadata::Serialized(SerializedMetadata { + metadata: metadata[0].clone(), + })), + }, + ) + .await + .unwrap(); + + client + .commit( + epoch2, + SinkMetadata { + metadata: Some(Metadata::Serialized(SerializedMetadata { + metadata: metadata[1].clone(), + })), + }, + ) + .await + .unwrap(); + } + #[tokio::test] async fn test_drop_sink_while_init() { let sink_id = SinkId::from(1); @@ -593,13 +698,14 @@ mod tests { sink_id, properties: Default::default(), columns: vec![], - pk_indices: vec![], + downstream_pk: vec![], sink_type: SinkType::AppendOnly, + format_desc: None, db_name: "test".into(), sink_from_name: "test".into(), }; - let (manager, (_join_handle, _stop_tx)) = SinkCoordinatorManager::start_worker(None); + let (manager, (_join_handle, _stop_tx)) = SinkCoordinatorManager::start_worker(); let mut build_client_future1 = pin!(CoordinatorStreamHandle::new_with_init_stream( param.to_proto(), @@ -631,8 +737,9 @@ mod tests { sink_id, properties: Default::default(), columns: vec![], - pk_indices: vec![], + downstream_pk: vec![], sink_type: SinkType::AppendOnly, + format_desc: None, db_name: "test".into(), sink_from_name: "test".into(), }; @@ -653,9 +760,9 @@ mod tests { let vnode2 = build_bitmap(second); let (manager, (_join_handle, _stop_tx)) = - SinkCoordinatorManager::start_worker_with_spawn_worker(None, { + SinkCoordinatorManager::start_worker_with_spawn_worker({ let param = param.clone(); - move |first_request: NewSinkWriterRequest, new_writer_rx, _| { + move |first_request: NewSinkWriterRequest, new_writer_rx| { let param = param.clone(); tokio::spawn(async move { // validate the start request @@ -713,8 +820,9 @@ mod tests { sink_id, properties: Default::default(), columns: vec![], - pk_indices: vec![], + downstream_pk: vec![], sink_type: SinkType::AppendOnly, + format_desc: None, db_name: "test".into(), sink_from_name: "test".into(), }; @@ -735,9 +843,9 @@ mod tests { let vnode2 = build_bitmap(second); let (manager, (_join_handle, _stop_tx)) = - SinkCoordinatorManager::start_worker_with_spawn_worker(None, { + SinkCoordinatorManager::start_worker_with_spawn_worker({ let param = param.clone(); - move |first_request: NewSinkWriterRequest, new_writer_rx, _| { + move |first_request: NewSinkWriterRequest, new_writer_rx| { let param = param.clone(); tokio::spawn(async move { // validate the start request diff --git a/src/meta/src/manager/sink_coordination/mod.rs b/src/meta/src/manager/sink_coordination/mod.rs index 30786c8721e97..fe861e2175343 100644 --- a/src/meta/src/manager/sink_coordination/mod.rs +++ b/src/meta/src/manager/sink_coordination/mod.rs @@ -16,19 +16,19 @@ mod coordinator_worker; mod manager; use futures::stream::BoxStream; -pub(crate) use manager::SinkCoordinatorManager; +pub use manager::SinkCoordinatorManager; use risingwave_common::buffer::Bitmap; use risingwave_connector::sink::SinkParam; use risingwave_pb::connector_service::{CoordinateRequest, CoordinateResponse}; use tokio::sync::mpsc::Sender; use tonic::Status; -pub(crate) type SinkWriterRequestStream = BoxStream<'static, Result>; -pub(crate) type SinkCoordinatorResponseSender = Sender>; +pub type SinkWriterRequestStream = BoxStream<'static, Result>; +pub type SinkCoordinatorResponseSender = Sender>; -pub(crate) struct NewSinkWriterRequest { - pub(crate) request_stream: SinkWriterRequestStream, - pub(crate) response_tx: SinkCoordinatorResponseSender, - pub(crate) param: SinkParam, - pub(crate) vnode_bitmap: Bitmap, +pub struct NewSinkWriterRequest { + pub request_stream: SinkWriterRequestStream, + pub response_tx: SinkCoordinatorResponseSender, + pub param: SinkParam, + pub vnode_bitmap: Bitmap, } diff --git a/src/meta/src/manager/streaming_job.rs b/src/meta/src/manager/streaming_job.rs index 6b3e71fe20092..e02388eba4f3d 100644 --- a/src/meta/src/manager/streaming_job.rs +++ b/src/meta/src/manager/streaming_job.rs @@ -16,7 +16,7 @@ use std::collections::HashMap; use risingwave_common::catalog::TableVersionId; use risingwave_common::util::epoch::Epoch; -use risingwave_pb::catalog::{Index, Sink, Source, Table}; +use risingwave_pb::catalog::{CreateType, Index, Sink, Source, Table}; use crate::model::FragmentId; @@ -31,7 +31,7 @@ pub enum StreamingJob { } impl StreamingJob { - pub(crate) fn mark_created(&mut self) { + pub fn mark_created(&mut self) { let created_at_epoch = Some(Epoch::now().0); match self { StreamingJob::MaterializedView(table) => table.created_at_epoch = created_at_epoch, @@ -48,7 +48,7 @@ impl StreamingJob { } } - pub(crate) fn mark_initialized(&mut self) { + pub fn mark_initialized(&mut self) { let initialized_at_epoch = Some(Epoch::now().0); match self { StreamingJob::MaterializedView(table) => { @@ -197,4 +197,13 @@ impl StreamingJob { None } } + + pub fn create_type(&self) -> CreateType { + match self { + Self::MaterializedView(table) => { + table.get_create_type().unwrap_or(CreateType::Foreground) + } + _ => CreateType::Foreground, + } + } } diff --git a/src/meta/src/manager/system_param/mod.rs b/src/meta/src/manager/system_param/mod.rs index 861234bdfe9fe..eb24e0db0f340 100644 --- a/src/meta/src/manager/system_param/mod.rs +++ b/src/meta/src/manager/system_param/mod.rs @@ -21,7 +21,7 @@ use std::time::Duration; use anyhow::anyhow; use risingwave_common::system_param::reader::SystemParamsReader; use risingwave_common::system_param::{check_missing_params, set_system_param}; -use risingwave_common::{for_all_undeprecated_params, key_of}; +use risingwave_common::{for_all_params, key_of}; use risingwave_pb::meta::subscribe_response::{Info, Operation}; use risingwave_pb::meta::SystemParams; use tokio::sync::oneshot::Sender; @@ -89,7 +89,7 @@ impl SystemParamsManager { set_system_param(mem_txn.deref_mut(), name, value).map_err(MetaError::system_param)?; let mut store_txn = Transaction::default(); - mem_txn.apply_to_txn(&mut store_txn)?; + mem_txn.apply_to_txn(&mut store_txn).await?; self.meta_store.txn(store_txn).await?; mem_txn.commit(); @@ -182,4 +182,4 @@ macro_rules! impl_merge_params { }; } -for_all_undeprecated_params!(impl_merge_params); +for_all_params!(impl_merge_params); diff --git a/src/meta/src/manager/system_param/model.rs b/src/meta/src/manager/system_param/model.rs index bed4f3d86e8a4..d486d6a5d74c6 100644 --- a/src/meta/src/manager/system_param/model.rs +++ b/src/meta/src/manager/system_param/model.rs @@ -67,20 +67,21 @@ impl SystemParamsModel for SystemParams { S: MetaStore, { let mut txn = Transaction::default(); - self.upsert_in_transaction(&mut txn)?; + self.upsert_in_transaction(&mut txn).await?; Ok(store.txn(txn).await?) } } -impl Transactional for SystemParams { - fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { +#[async_trait] +impl Transactional for SystemParams { + async fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { for (k, v) in system_params_to_kv(self).map_err(MetadataModelError::internal)? { trx.put(Self::cf_name(), k.into_bytes(), v.into_bytes()); } Ok(()) } - fn delete_in_transaction(&self, _trx: &mut Transaction) -> MetadataModelResult<()> { + async fn delete_in_transaction(&self, _trx: &mut Transaction) -> MetadataModelResult<()> { unreachable!() } } diff --git a/src/meta/src/model/cluster.rs b/src/meta/src/model/cluster.rs index 882f48b6dc8c4..3d654a1d6b8c9 100644 --- a/src/meta/src/model/cluster.rs +++ b/src/meta/src/model/cluster.rs @@ -128,8 +128,14 @@ const CLUSTER_ID_KEY: &[u8] = "cluster_id".as_bytes(); #[derive(Clone, Debug)] pub struct ClusterId(String); +impl Default for ClusterId { + fn default() -> Self { + Self::new() + } +} + impl ClusterId { - pub(crate) fn new() -> Self { + pub fn new() -> Self { Self(Uuid::new_v4().to_string()) } @@ -139,15 +145,13 @@ impl ClusterId { )) } - pub(crate) async fn from_meta_store( + pub async fn from_meta_store( meta_store: &S, ) -> MetadataModelResult> { Self::from_snapshot::(&meta_store.snapshot().await).await } - pub(crate) async fn from_snapshot( - s: &S::Snapshot, - ) -> MetadataModelResult> { + pub async fn from_snapshot(s: &S::Snapshot) -> MetadataModelResult> { match s.get_cf(CLUSTER_ID_CF_NAME, CLUSTER_ID_KEY).await { Ok(bytes) => Ok(Some(Self::from_bytes(bytes)?)), Err(e) => match e { @@ -157,10 +161,7 @@ impl ClusterId { } } - pub(crate) async fn put_at_meta_store( - &self, - meta_store: &S, - ) -> MetadataModelResult<()> { + pub async fn put_at_meta_store(&self, meta_store: &S) -> MetadataModelResult<()> { Ok(meta_store .put_cf( CLUSTER_ID_CF_NAME, diff --git a/src/meta/src/model/mod.rs b/src/meta/src/model/mod.rs index bb07e7e7b6cf1..f1fe0285d9ae8 100644 --- a/src/meta/src/model/mod.rs +++ b/src/meta/src/model/mod.rs @@ -24,6 +24,7 @@ mod user; use std::collections::btree_map::{Entry, VacantEntry}; use std::collections::BTreeMap; use std::fmt::Debug; +use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use async_trait::async_trait; @@ -48,9 +49,10 @@ pub type DispatcherId = u64; /// A global, unique identifier of a fragment pub type FragmentId = u32; -pub trait Transactional { - fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()>; - fn delete_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()>; +#[async_trait] +pub trait Transactional { + async fn upsert_in_transaction(&self, trx: &mut TXN) -> MetadataModelResult<()>; + async fn delete_in_transaction(&self, trx: &mut TXN) -> MetadataModelResult<()>; } mod private { @@ -203,11 +205,12 @@ for_all_metadata_models!(impl_metadata_model_marker); /// `Transactional` defines operations supported in a transaction. /// Read operations can be supported if necessary. -impl Transactional for T +#[async_trait] +impl Transactional for T where - T: MetadataModel, + T: MetadataModel + Sync, { - fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { + async fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { trx.put( Self::cf_name(), self.key()?.encode_to_vec(), @@ -216,7 +219,7 @@ where Ok(()) } - fn delete_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { + async fn delete_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { trx.delete(Self::cf_name(), self.key()?.encode_to_vec()); Ok(()) } @@ -225,11 +228,12 @@ where /// Trait that wraps a local memory value and applies the change to the local memory value on /// `commit` or leaves the local memory value untouched on `abort`. pub trait ValTransaction: Sized { + type TXN; /// Commit the change to local memory value fn commit(self); /// Apply the change (upsert or delete) to `txn` - fn apply_to_txn(&self, txn: &mut Transaction) -> MetadataModelResult<()>; + async fn apply_to_txn(&self, txn: &mut Self::TXN) -> MetadataModelResult<()>; /// Abort the `VarTransaction` and leave the local memory value untouched fn abort(self) { @@ -243,26 +247,28 @@ pub trait ValTransaction: Sized { /// When `commit` is called, the change to `new_value` will be applied to the `orig_value_ref` /// When `abort` is called, the `VarTransaction` is dropped and the local memory value is /// untouched. -pub struct VarTransaction<'a, T: Transactional> { +pub struct VarTransaction<'a, TXN, T: Transactional> { orig_value_ref: &'a mut T, new_value: Option, + _phantom: PhantomData, } -impl<'a, T> VarTransaction<'a, T> +impl<'a, TXN, T> VarTransaction<'a, TXN, T> where - T: Transactional, + T: Transactional, { /// Create a `VarTransaction` that wraps a raw variable - pub fn new(val_ref: &'a mut T) -> VarTransaction<'a, T> { + pub fn new(val_ref: &'a mut T) -> VarTransaction<'a, TXN, T> { VarTransaction { // lazy initialization new_value: None, orig_value_ref: val_ref, + _phantom: PhantomData, } } } -impl<'a, T: Transactional> Deref for VarTransaction<'a, T> { +impl<'a, TXN, T: Transactional> Deref for VarTransaction<'a, TXN, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -273,9 +279,9 @@ impl<'a, T: Transactional> Deref for VarTransaction<'a, T> { } } -impl<'a, T> DerefMut for VarTransaction<'a, T> +impl<'a, TXN, T> DerefMut for VarTransaction<'a, TXN, T> where - T: Clone + Transactional, + T: Clone + Transactional, { fn deref_mut(&mut self) -> &mut Self::Target { if self.new_value.is_none() { @@ -285,21 +291,23 @@ where } } -impl<'a, T> ValTransaction for VarTransaction<'a, T> +impl<'a, TXN, T> ValTransaction for VarTransaction<'a, TXN, T> where - T: Transactional + PartialEq, + T: Transactional + PartialEq, { + type TXN = TXN; + fn commit(self) { if let Some(new_value) = self.new_value { *self.orig_value_ref = new_value; } } - fn apply_to_txn(&self, txn: &mut Transaction) -> MetadataModelResult<()> { + async fn apply_to_txn(&self, txn: &mut Self::TXN) -> MetadataModelResult<()> { if let Some(new_value) = &self.new_value { // Apply the change to `txn` only when the value is modified if *self.orig_value_ref != *new_value { - new_value.upsert_in_transaction(txn) + new_value.upsert_in_transaction(txn).await } else { Ok(()) } @@ -418,25 +426,27 @@ enum BTreeMapOp { /// are stored in `staging`. On `commit`, it will apply the changes stored in `staging` to the in /// memory btree map. When serve `get` and `get_mut`, it merges the value stored in `staging` and /// `tree_ref`. -pub struct BTreeMapTransaction<'a, K: Ord, V> { +pub struct BTreeMapTransaction<'a, K: Ord, V, TXN = Transaction> { /// A reference to the original `BTreeMap`. All access to this field should be immutable, /// except when we commit the staging changes to the original map. tree_ref: &'a mut BTreeMap, /// Store all the staging changes that will be applied to the original map on commit staging: BTreeMap>, + _phantom: PhantomData, } -impl<'a, K: Ord + Debug, V: Clone> BTreeMapTransaction<'a, K, V> { - pub fn new(tree_ref: &'a mut BTreeMap) -> BTreeMapTransaction<'a, K, V> { +impl<'a, K: Ord + Debug, V: Clone, TXN> BTreeMapTransaction<'a, K, V, TXN> { + pub fn new(tree_ref: &'a mut BTreeMap) -> BTreeMapTransaction<'a, K, V, TXN> { Self { tree_ref, staging: BTreeMap::default(), + _phantom: PhantomData, } } /// Start a `BTreeMapEntryTransaction` when the `key` exists #[allow(dead_code)] - pub fn new_entry_txn(&mut self, key: K) -> Option> { + pub fn new_entry_txn(&mut self, key: K) -> Option> { BTreeMapEntryTransaction::new(self.tree_ref, key, None) } @@ -447,13 +457,17 @@ impl<'a, K: Ord + Debug, V: Clone> BTreeMapTransaction<'a, K, V> { &mut self, key: K, default_val: V, - ) -> BTreeMapEntryTransaction<'_, K, V> { + ) -> BTreeMapEntryTransaction<'_, K, V, TXN> { BTreeMapEntryTransaction::new(self.tree_ref, key, Some(default_val)) .expect("default value is provided and should return `Some`") } /// Start a `BTreeMapEntryTransaction` that inserts the `val` into `key`. - pub fn new_entry_insert_txn(&mut self, key: K, val: V) -> BTreeMapEntryTransaction<'_, K, V> { + pub fn new_entry_insert_txn( + &mut self, + key: K, + val: V, + ) -> BTreeMapEntryTransaction<'_, K, V, TXN> { BTreeMapEntryTransaction::new_insert(self.tree_ref, key, val) } @@ -549,21 +563,23 @@ impl<'a, K: Ord + Debug, V: Clone> BTreeMapTransaction<'a, K, V> { } } -impl<'a, K: Ord + Debug, V: Transactional + Clone> ValTransaction - for BTreeMapTransaction<'a, K, V> +impl<'a, K: Ord + Debug, V: Transactional + Clone, TXN> ValTransaction + for BTreeMapTransaction<'a, K, V, TXN> { + type TXN = TXN; + fn commit(self) { self.commit_memory(); } - fn apply_to_txn(&self, txn: &mut Transaction) -> MetadataModelResult<()> { + async fn apply_to_txn(&self, txn: &mut Self::TXN) -> MetadataModelResult<()> { // Add the staging operation to txn for (k, op) in &self.staging { match op { - BTreeMapOp::Insert(v) => v.upsert_in_transaction(txn)?, + BTreeMapOp::Insert(v) => v.upsert_in_transaction(txn).await?, BTreeMapOp::Delete => { if let Some(v) = self.tree_ref.get(k) { - v.delete_in_transaction(txn)?; + v.delete_in_transaction(txn).await?; } } } @@ -573,24 +589,26 @@ impl<'a, K: Ord + Debug, V: Transactional + Clone> ValTransaction } /// Transaction wrapper for a `BTreeMap` entry value of given `key` -pub struct BTreeMapEntryTransaction<'a, K, V> { +pub struct BTreeMapEntryTransaction<'a, K, V, TXN> { tree_ref: &'a mut BTreeMap, pub key: K, pub new_value: V, + _phantom: PhantomData, } -impl<'a, K: Ord + Debug, V: Clone> BTreeMapEntryTransaction<'a, K, V> { +impl<'a, K: Ord + Debug, V: Clone, TXN> BTreeMapEntryTransaction<'a, K, V, TXN> { /// Create a `ValTransaction` that wraps a `BTreeMap` entry of the given `key`. /// If the tree does not contain `key`, the `default_val` will be used as the initial value pub fn new_insert( tree_ref: &'a mut BTreeMap, key: K, value: V, - ) -> BTreeMapEntryTransaction<'a, K, V> { + ) -> BTreeMapEntryTransaction<'a, K, V, TXN> { BTreeMapEntryTransaction { new_value: value, tree_ref, key, + _phantom: PhantomData, } } @@ -604,7 +622,7 @@ impl<'a, K: Ord + Debug, V: Clone> BTreeMapEntryTransaction<'a, K, V> { tree_ref: &'a mut BTreeMap, key: K, default_val: Option, - ) -> Option> { + ) -> Option> { tree_ref .get(&key) .cloned() @@ -613,11 +631,12 @@ impl<'a, K: Ord + Debug, V: Clone> BTreeMapEntryTransaction<'a, K, V> { new_value: orig_value, tree_ref, key, + _phantom: PhantomData, }) } } -impl<'a, K, V> Deref for BTreeMapEntryTransaction<'a, K, V> { +impl<'a, K, V, TXN> Deref for BTreeMapEntryTransaction<'a, K, V, TXN> { type Target = V; fn deref(&self) -> &Self::Target { @@ -625,24 +644,26 @@ impl<'a, K, V> Deref for BTreeMapEntryTransaction<'a, K, V> { } } -impl<'a, K, V> DerefMut for BTreeMapEntryTransaction<'a, K, V> { +impl<'a, K, V, TXN> DerefMut for BTreeMapEntryTransaction<'a, K, V, TXN> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.new_value } } -impl<'a, K: Ord, V: PartialEq + Transactional> ValTransaction - for BTreeMapEntryTransaction<'a, K, V> +impl<'a, K: Ord, V: PartialEq + Transactional, TXN> ValTransaction + for BTreeMapEntryTransaction<'a, K, V, TXN> { + type TXN = TXN; + fn commit(self) { self.tree_ref.insert(self.key, self.new_value); } - fn apply_to_txn(&self, txn: &mut Transaction) -> MetadataModelResult<()> { + async fn apply_to_txn(&self, txn: &mut Self::TXN) -> MetadataModelResult<()> { if !self.tree_ref.contains_key(&self.key) || *self.tree_ref.get(&self.key).unwrap() != self.new_value { - self.new_value.upsert_in_transaction(txn)? + self.new_value.upsert_in_transaction(txn).await? } Ok(()) } @@ -661,8 +682,9 @@ mod tests { const TEST_CF: &str = "test-cf"; - impl Transactional for TestTransactional { - fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { + #[async_trait] + impl Transactional for TestTransactional { + async fn upsert_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { trx.put( TEST_CF.to_string(), self.key.as_bytes().into(), @@ -671,14 +693,14 @@ mod tests { Ok(()) } - fn delete_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { + async fn delete_in_transaction(&self, trx: &mut Transaction) -> MetadataModelResult<()> { trx.delete(TEST_CF.to_string(), self.key.as_bytes().into()); Ok(()) } } - #[test] - fn test_simple_var_transaction_commit() { + #[tokio::test] + async fn test_simple_var_transaction_commit() { let mut kv = TestTransactional { key: "key", value: "original", @@ -687,7 +709,7 @@ mod tests { num_txn.value = "modified"; assert_eq!(num_txn.value, "modified"); let mut txn = Transaction::default(); - num_txn.apply_to_txn(&mut txn).unwrap(); + num_txn.apply_to_txn(&mut txn).await.unwrap(); let txn_op = txn.get_operations(); assert_eq!(1, txn_op.len()); assert!(matches!( @@ -717,8 +739,8 @@ mod tests { assert_eq!("original", kv.value); } - #[test] - fn test_tree_map_transaction_commit() { + #[tokio::test] + async fn test_tree_map_transaction_commit() { let mut map: BTreeMap = BTreeMap::new(); map.insert( "to-remove".to_string(), @@ -800,7 +822,7 @@ mod tests { ); let mut txn = Transaction::default(); - map_txn.apply_to_txn(&mut txn).unwrap(); + map_txn.apply_to_txn(&mut txn).await.unwrap(); let txn_ops = txn.get_operations(); assert_eq!(5, txn_ops.len()); for op in txn_ops { @@ -860,8 +882,8 @@ mod tests { assert_eq!(map_copy, map); } - #[test] - fn test_tree_map_entry_update_transaction_commit() { + #[tokio::test] + async fn test_tree_map_entry_update_transaction_commit() { let mut map: BTreeMap = BTreeMap::new(); map.insert( "first".to_string(), @@ -875,7 +897,7 @@ mod tests { let mut first_entry_txn = map_txn.new_entry_txn("first".to_string()).unwrap(); first_entry_txn.value = "first-value"; let mut txn = Transaction::default(); - first_entry_txn.apply_to_txn(&mut txn).unwrap(); + first_entry_txn.apply_to_txn(&mut txn).await.unwrap(); let txn_ops = txn.get_operations(); assert_eq!(1, txn_ops.len()); assert!( @@ -885,8 +907,8 @@ mod tests { assert_eq!("first-value", map.get("first").unwrap().value); } - #[test] - fn test_tree_map_entry_insert_transaction_commit() { + #[tokio::test] + async fn test_tree_map_entry_insert_transaction_commit() { let mut map: BTreeMap = BTreeMap::new(); let mut map_txn = BTreeMapTransaction::new(&mut map); @@ -898,7 +920,7 @@ mod tests { }, ); let mut txn = Transaction::default(); - first_entry_txn.apply_to_txn(&mut txn).unwrap(); + first_entry_txn.apply_to_txn(&mut txn).await.unwrap(); let txn_ops = txn.get_operations(); assert_eq!(1, txn_ops.len()); assert!( diff --git a/src/meta/src/model/stream.rs b/src/meta/src/model/stream.rs index 5dd8f53e249b0..726bd7fcd8e73 100644 --- a/src/meta/src/model/stream.rs +++ b/src/meta/src/model/stream.rs @@ -13,6 +13,7 @@ // limitations under the License. use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::ops::AddAssign; use itertools::Itertools; use risingwave_common::catalog::TableId; @@ -48,22 +49,22 @@ pub struct TableFragments { state: State, /// The table fragments. - pub(crate) fragments: BTreeMap, + pub fragments: BTreeMap, /// The status of actors - pub(crate) actor_status: BTreeMap, + pub actor_status: BTreeMap, /// The splits of actors - pub(crate) actor_splits: HashMap>, + pub actor_splits: HashMap>, /// The environment associated with this stream plan and its fragments - pub(crate) env: StreamEnvironment, + pub env: StreamEnvironment, } #[derive(Debug, Clone, Default)] pub struct StreamEnvironment { /// The timezone used to interpret timestamps and dates for conversion - pub(crate) timezone: Option, + pub timezone: Option, } impl StreamEnvironment { @@ -353,9 +354,12 @@ impl TableFragments { } /// Resolve dependent table - fn resolve_dependent_table(stream_node: &StreamNode, table_ids: &mut HashSet) { + fn resolve_dependent_table(stream_node: &StreamNode, table_ids: &mut HashMap) { if let Some(NodeBody::Chain(chain)) = stream_node.node_body.as_ref() { - table_ids.insert(TableId::new(chain.table_id)); + table_ids + .entry(TableId::new(chain.table_id)) + .or_default() + .add_assign(1); } for child in &stream_node.input { @@ -363,9 +367,10 @@ impl TableFragments { } } - /// Returns dependent table ids. - pub fn dependent_table_ids(&self) -> HashSet { - let mut table_ids = HashSet::new(); + /// Returns a mapping of dependent table ids of the `TableFragments` + /// to their corresponding count. + pub fn dependent_table_ids(&self) -> HashMap { + let mut table_ids = HashMap::new(); self.fragments.values().for_each(|fragment| { let actor = &fragment.actors[0]; Self::resolve_dependent_table(actor.nodes.as_ref().unwrap(), &mut table_ids); diff --git a/src/meta/src/model_v2/README.md b/src/meta/src/model_v2/README.md new file mode 100644 index 0000000000000..25c22a4f566e1 --- /dev/null +++ b/src/meta/src/model_v2/README.md @@ -0,0 +1,50 @@ +# How to define changes between versions and generate migration and model files + +- Generate a new migration file and apply it to the database, check [migration](./migration/README.md) for more details. Let's take a local PG database as an example(`postgres://postgres:@localhost:5432/postgres`): + ```sh + export DATABASE_URL=postgres://postgres:@localhost:5432/postgres; + cargo run -- generate MIGRATION_NAME + cargo run -- up + ``` + - Define tables, indexes, foreign keys in the file. The new generated file will include a sample migration script, + you can replace it with your own migration scripts, like defining or changing tables, indexes, foreign keys and other + dml operation to do data correctness etc. Check [writing-migration](https://www.sea-ql.org/SeaORM/docs/migration/writing-migration/) + for more details. + ```rust + #[async_trait::async_trait] + impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // Replace the sample below with your own migration scripts + todo!(); + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // Replace the sample below with your own migration scripts + todo!(); + } + } + ``` +- Apply migration, and generate model files for new tables and indexes from the database, so you don't need to write them manually, + ```sh + cargo run -- up + sea-orm-cli generate entity -u postgres://postgres:@localhost:5432/postgres -s public -o {target_dir} + cp {target_dir}/xxx.rs src/meta/src/model_v2/ + ``` +- Defines enum and array types in the model files, since they're basically only supported in PG, and we need to + define them in the model files manually. For example: + ```rust + // We define integer array typed fields as json and derive it using the follow one. + #[derive(Clone, Debug, PartialEq, FromJsonQueryResult, Eq, Serialize, Deserialize, Default)] + pub struct I32Array(pub Vec); + + // We define enum typed fields as string and derive it using the follow one. + #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] + #[sea_orm(rs_type = "String", db_type = "String(None)")] + pub enum WorkerStatus { + #[sea_orm(string_value = "STARTING")] + Starting, + #[sea_orm(string_value = "RUNNING")] + Running, + } + ``` +- Define other helper functions in the model files if necessary. \ No newline at end of file diff --git a/src/meta/src/model_v2/actor.rs b/src/meta/src/model_v2/actor.rs new file mode 100644 index 0000000000000..8fecb3046b1bc --- /dev/null +++ b/src/meta/src/model_v2/actor.rs @@ -0,0 +1,51 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::I32Array; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "actor")] +pub struct Model { + #[sea_orm(primary_key)] + pub actor_id: i32, + pub fragment_id: i32, + pub status: Option, + pub splits: Option, + pub parallel_unit_id: i32, + pub upstream_actor_ids: Option, + pub dispatchers: Option, + pub vnode_bitmap: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::fragment::Entity", + from = "Column::FragmentId", + to = "super::fragment::Column::FragmentId", + on_update = "NoAction", + on_delete = "Cascade" + )] + Fragment, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Fragment.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/cluster.rs b/src/meta/src/model_v2/cluster.rs new file mode 100644 index 0000000000000..36cdb449046bf --- /dev/null +++ b/src/meta/src/model_v2/cluster.rs @@ -0,0 +1,28 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "cluster")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub cluster_id: Uuid, + pub created_at: DateTime, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/compaction_config.rs b/src/meta/src/model_v2/compaction_config.rs new file mode 100644 index 0000000000000..6f8345734586e --- /dev/null +++ b/src/meta/src/model_v2/compaction_config.rs @@ -0,0 +1,29 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "compaction_config")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub compaction_group_id: i64, + #[sea_orm(column_type = "JsonBinary", nullable)] + pub config: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/compaction_status.rs b/src/meta/src/model_v2/compaction_status.rs new file mode 100644 index 0000000000000..5872463395066 --- /dev/null +++ b/src/meta/src/model_v2/compaction_status.rs @@ -0,0 +1,29 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "compaction_status")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub compaction_group_id: i64, + #[sea_orm(column_type = "JsonBinary", nullable)] + pub status: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/compaction_task.rs b/src/meta/src/model_v2/compaction_task.rs new file mode 100644 index 0000000000000..d3211b96d9a65 --- /dev/null +++ b/src/meta/src/model_v2/compaction_task.rs @@ -0,0 +1,30 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "compaction_task")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: i64, + #[sea_orm(column_type = "JsonBinary")] + pub task: Json, + pub context_id: i32, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/connection.rs b/src/meta/src/model_v2/connection.rs new file mode 100644 index 0000000000000..0096603c843a3 --- /dev/null +++ b/src/meta/src/model_v2/connection.rs @@ -0,0 +1,79 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::catalog::connection::PbInfo; +use risingwave_pb::catalog::PbConnection; +use sea_orm::entity::prelude::*; +use sea_orm::ActiveValue; + +use crate::model_v2::{ConnectionId, PrivateLinkService}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "connection")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub connection_id: ConnectionId, + pub name: String, + pub info: PrivateLinkService, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::ConnectionId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, + #[sea_orm(has_many = "super::sink::Entity")] + Sink, + #[sea_orm(has_many = "super::source::Entity")] + Source, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Sink.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Source.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} + +impl From for ActiveModel { + fn from(conn: PbConnection) -> Self { + let Some(PbInfo::PrivateLinkService(private_link_srv)) = conn.info else { + unreachable!("private link not provided.") + }; + + Self { + connection_id: ActiveValue::Set(conn.id as _), + name: ActiveValue::Set(conn.name), + info: ActiveValue::Set(PrivateLinkService(private_link_srv)), + } + } +} diff --git a/src/meta/src/model_v2/database.rs b/src/meta/src/model_v2/database.rs new file mode 100644 index 0000000000000..909c12eceac5a --- /dev/null +++ b/src/meta/src/model_v2/database.rs @@ -0,0 +1,46 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::DatabaseId; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "database")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub database_id: DatabaseId, + #[sea_orm(unique)] + pub name: String, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::DatabaseId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/ext/hummock.rs b/src/meta/src/model_v2/ext/hummock.rs new file mode 100644 index 0000000000000..77111e2e7d202 --- /dev/null +++ b/src/meta/src/model_v2/ext/hummock.rs @@ -0,0 +1,61 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::hummock::HummockPinnedVersion; +use sea_orm::sea_query::OnConflict; +use sea_orm::ActiveValue::{Set, Unchanged}; +use sea_orm::EntityTrait; + +use crate::model::{MetadataModelResult, Transactional}; +use crate::model_v2::hummock_pinned_version; +use crate::model_v2::trx::Transaction; + +#[async_trait::async_trait] +impl Transactional for HummockPinnedVersion { + async fn upsert_in_transaction( + &self, + trx: &mut crate::model_v2::trx::Transaction, + ) -> MetadataModelResult<()> { + // TODO: error type conversion + // TODO: integer type conversion + let m = hummock_pinned_version::ActiveModel { + context_id: Unchanged(self.context_id.try_into().unwrap()), + min_pinned_id: Set(self.min_pinned_id.try_into().unwrap()), + }; + hummock_pinned_version::Entity::insert(m) + .on_conflict( + OnConflict::column(hummock_pinned_version::Column::ContextId) + .update_columns([hummock_pinned_version::Column::MinPinnedId]) + .to_owned(), + ) + .exec(trx) + .await + .unwrap(); + Ok(()) + } + + async fn delete_in_transaction( + &self, + trx: &mut crate::model_v2::trx::Transaction, + ) -> MetadataModelResult<()> { + // TODO: error type conversion + // TODO: integer type conversion + let id: i32 = self.context_id.try_into().unwrap(); + hummock_pinned_version::Entity::delete_by_id(id) + .exec(trx) + .await + .unwrap(); + Ok(()) + } +} diff --git a/src/meta/src/model_v2/ext/mod.rs b/src/meta/src/model_v2/ext/mod.rs new file mode 100644 index 0000000000000..47a5ce8623dc4 --- /dev/null +++ b/src/meta/src/model_v2/ext/mod.rs @@ -0,0 +1,16 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod hummock; +pub use hummock::*; diff --git a/src/meta/src/model_v2/fragment.rs b/src/meta/src/model_v2/fragment.rs new file mode 100644 index 0000000000000..9263dd99eabb8 --- /dev/null +++ b/src/meta/src/model_v2/fragment.rs @@ -0,0 +1,62 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::I32Array; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "fragment")] +pub struct Model { + #[sea_orm(primary_key)] + pub fragment_id: i32, + pub table_id: i32, + pub fragment_type_mask: i32, + pub distribution_type: String, + pub stream_node: Json, + pub vnode_mapping: Option, + pub state_table_ids: Option, + pub upstream_fragment_id: Option, + pub dispatcher_type: Option, + pub dist_key_indices: Option, + pub output_indices: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::actor::Entity")] + Actor, + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::TableId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Actor.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/function.rs b/src/meta/src/model_v2/function.rs new file mode 100644 index 0000000000000..663f8e2284fd7 --- /dev/null +++ b/src/meta/src/model_v2/function.rs @@ -0,0 +1,90 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::catalog::function::Kind; +use risingwave_pb::catalog::PbFunction; +use sea_orm::entity::prelude::*; +use sea_orm::ActiveValue; + +use crate::model_v2::{DataType, DataTypeArray, FunctionId}; + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum FunctionKind { + #[sea_orm(string_value = "Scalar")] + Scalar, + #[sea_orm(string_value = "Table")] + Table, + #[sea_orm(string_value = "Aggregate")] + Aggregate, +} + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "function")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub function_id: FunctionId, + pub name: String, + pub arg_types: DataTypeArray, + pub return_type: DataType, + pub language: String, + pub link: String, + pub identifier: String, + pub kind: FunctionKind, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::FunctionId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} + +impl From for FunctionKind { + fn from(kind: Kind) -> Self { + match kind { + Kind::Scalar(_) => Self::Scalar, + Kind::Table(_) => Self::Table, + Kind::Aggregate(_) => Self::Aggregate, + } + } +} + +impl From for ActiveModel { + fn from(function: PbFunction) -> Self { + Self { + function_id: ActiveValue::Set(function.id as _), + name: ActiveValue::Set(function.name), + arg_types: ActiveValue::Set(DataTypeArray(function.arg_types)), + return_type: ActiveValue::Set(DataType(function.return_type.unwrap())), + language: ActiveValue::Set(function.language), + link: ActiveValue::Set(function.link), + identifier: ActiveValue::Set(function.identifier), + kind: ActiveValue::Set(function.kind.unwrap().into()), + } + } +} diff --git a/src/meta/src/model_v2/hummock_pinned_snapshot.rs b/src/meta/src/model_v2/hummock_pinned_snapshot.rs new file mode 100644 index 0000000000000..170f35dd5d358 --- /dev/null +++ b/src/meta/src/model_v2/hummock_pinned_snapshot.rs @@ -0,0 +1,28 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "hummock_pinned_snapshot")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub context_id: i32, + pub min_pinned_snapshot: i64, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/hummock_pinned_version.rs b/src/meta/src/model_v2/hummock_pinned_version.rs new file mode 100644 index 0000000000000..6e2f34a5f735e --- /dev/null +++ b/src/meta/src/model_v2/hummock_pinned_version.rs @@ -0,0 +1,28 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "hummock_pinned_version")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub context_id: i32, + pub min_pinned_id: i64, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/hummock_version_delta.rs b/src/meta/src/model_v2/hummock_version_delta.rs new file mode 100644 index 0000000000000..100dd82eafe94 --- /dev/null +++ b/src/meta/src/model_v2/hummock_version_delta.rs @@ -0,0 +1,35 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "hummock_version_delta")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: i64, + pub prev_id: i64, + #[sea_orm(column_type = "JsonBinary", nullable)] + pub group_deltas: Option, + pub max_committed_epoch: i64, + pub safe_epoch: i64, + pub trivial_move: bool, + #[sea_orm(column_type = "JsonBinary", nullable)] + pub gc_object_ids: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/hummock_version_stats.rs b/src/meta/src/model_v2/hummock_version_stats.rs new file mode 100644 index 0000000000000..1a7e990df405a --- /dev/null +++ b/src/meta/src/model_v2/hummock_version_stats.rs @@ -0,0 +1,29 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "hummock_version_stats")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: i64, + #[sea_orm(column_type = "JsonBinary")] + pub stats: Json, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/index.rs b/src/meta/src/model_v2/index.rs new file mode 100644 index 0000000000000..3b80632e2cfc3 --- /dev/null +++ b/src/meta/src/model_v2/index.rs @@ -0,0 +1,66 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{ExprNodeArray, I32Array, IndexId, JobStatus, TableId}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "index")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub index_id: IndexId, + pub name: String, + pub index_table_id: TableId, + pub primary_table_id: TableId, + pub index_items: ExprNodeArray, + pub original_columns: I32Array, + pub job_status: JobStatus, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::IndexId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, + #[sea_orm( + belongs_to = "super::table::Entity", + from = "Column::IndexTableId", + to = "super::table::Column::TableId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Table2, + #[sea_orm( + belongs_to = "super::table::Entity", + from = "Column::PrimaryTableId", + to = "super::table::Column::TableId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Table1, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/migration/Cargo.toml b/src/meta/src/model_v2/migration/Cargo.toml new file mode 100644 index 0000000000000..d5d51d77da909 --- /dev/null +++ b/src/meta/src/model_v2/migration/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "model_migration" +version = "0.1.0" +edition = "2021" +publish = false + +[lib] +name = "model_migration" +path = "src/lib.rs" + +[dependencies] +async-std = { version = "1", features = ["attributes", "tokio1"] } +uuid = { version = "1", features = ["v4"] } + +[dependencies.sea-orm-migration] +version = "0.12.0" +features = ["sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", "runtime-tokio-native-tls", "with-uuid"] diff --git a/src/meta/src/model_v2/migration/README.md b/src/meta/src/model_v2/migration/README.md new file mode 100644 index 0000000000000..3b438d89e31c5 --- /dev/null +++ b/src/meta/src/model_v2/migration/README.md @@ -0,0 +1,41 @@ +# Running Migrator CLI + +- Generate a new migration file + ```sh + cargo run -- generate MIGRATION_NAME + ``` +- Apply all pending migrations + ```sh + cargo run + ``` + ```sh + cargo run -- up + ``` +- Apply first 10 pending migrations + ```sh + cargo run -- up -n 10 + ``` +- Rollback last applied migrations + ```sh + cargo run -- down + ``` +- Rollback last 10 applied migrations + ```sh + cargo run -- down -n 10 + ``` +- Drop all tables from the database, then reapply all migrations + ```sh + cargo run -- fresh + ``` +- Rollback all applied migrations, then reapply all migrations + ```sh + cargo run -- refresh + ``` +- Rollback all applied migrations + ```sh + cargo run -- reset + ``` +- Check the status of all migrations + ```sh + cargo run -- status + ``` diff --git a/src/meta/src/model_v2/migration/src/lib.rs b/src/meta/src/model_v2/migration/src/lib.rs new file mode 100644 index 0000000000000..570bc75d08e99 --- /dev/null +++ b/src/meta/src/model_v2/migration/src/lib.rs @@ -0,0 +1,16 @@ +pub use sea_orm_migration::prelude::*; + +mod m20230908_072257_init; +mod m20231008_020431_hummock; + +pub struct Migrator; + +#[async_trait::async_trait] +impl MigratorTrait for Migrator { + fn migrations() -> Vec> { + vec![ + Box::new(m20230908_072257_init::Migration), + Box::new(m20231008_020431_hummock::Migration), + ] + } +} diff --git a/src/meta/src/model_v2/migration/src/m20230908_072257_init.rs b/src/meta/src/model_v2/migration/src/m20230908_072257_init.rs new file mode 100644 index 0000000000000..c9559bd6feda2 --- /dev/null +++ b/src/meta/src/model_v2/migration/src/m20230908_072257_init.rs @@ -0,0 +1,999 @@ +use sea_orm_migration::prelude::{Index as MigrationIndex, Table as MigrationTable, *}; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // 1. check if the table exists. + assert!(!manager.has_table(Cluster::Table.to_string()).await?); + assert!(!manager.has_table(Worker::Table.to_string()).await?); + assert!(!manager.has_table(WorkerProperty::Table.to_string()).await?); + assert!(!manager.has_table(User::Table.to_string()).await?); + assert!(!manager.has_table(UserPrivilege::Table.to_string()).await?); + assert!(!manager.has_table(Database::Table.to_string()).await?); + assert!(!manager.has_table(Schema::Table.to_string()).await?); + assert!(!manager.has_table(Fragment::Table.to_string()).await?); + assert!(!manager.has_table(Actor::Table.to_string()).await?); + assert!(!manager.has_table(Table::Table.to_string()).await?); + assert!(!manager.has_table(Source::Table.to_string()).await?); + assert!(!manager.has_table(Sink::Table.to_string()).await?); + assert!(!manager.has_table(Connection::Table.to_string()).await?); + assert!(!manager.has_table(View::Table.to_string()).await?); + assert!(!manager.has_table(Index::Table.to_string()).await?); + assert!(!manager.has_table(Function::Table.to_string()).await?); + assert!(!manager.has_table(Object::Table.to_string()).await?); + assert!( + !manager + .has_table(ObjectDependency::Table.to_string()) + .await? + ); + assert!( + !manager + .has_table(SystemParameter::Table.to_string()) + .await? + ); + + // 2. create tables. + manager + .create_table( + MigrationTable::create() + .table(Cluster::Table) + .col( + ColumnDef::new(Cluster::ClusterId) + .uuid() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(Cluster::CreatedAt) + .timestamp() + .default(Expr::current_timestamp()) + .not_null(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Worker::Table) + .col( + ColumnDef::new(Worker::WorkerId) + .integer() + .not_null() + .auto_increment() + .primary_key(), + ) + .col(ColumnDef::new(Worker::WorkerType).string().not_null()) + .col(ColumnDef::new(Worker::Host).string().not_null()) + .col(ColumnDef::new(Worker::Port).integer().not_null()) + .col(ColumnDef::new(Worker::Status).string().not_null()) + .col(ColumnDef::new(Worker::TransactionId).integer()) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(WorkerProperty::Table) + .col( + ColumnDef::new(WorkerProperty::WorkerId) + .integer() + .primary_key(), + ) + .col( + ColumnDef::new(WorkerProperty::ParallelUnitIds) + .json() + .not_null(), + ) + .col( + ColumnDef::new(WorkerProperty::IsStreaming) + .boolean() + .not_null(), + ) + .col( + ColumnDef::new(WorkerProperty::IsServing) + .boolean() + .not_null(), + ) + .col( + ColumnDef::new(WorkerProperty::IsUnschedulable) + .boolean() + .not_null(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_worker_property_worker_id") + .from(WorkerProperty::Table, WorkerProperty::WorkerId) + .to(Worker::Table, Worker::WorkerId) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(User::Table) + .col( + ColumnDef::new(User::UserId) + .integer() + .primary_key() + .auto_increment(), + ) + .col(ColumnDef::new(User::Name).string().not_null()) + .col(ColumnDef::new(User::IsSuper).boolean().not_null()) + .col(ColumnDef::new(User::CanCreateDb).boolean().not_null()) + .col(ColumnDef::new(User::CanCreateUser).boolean().not_null()) + .col(ColumnDef::new(User::CanLogin).boolean().not_null()) + .col(ColumnDef::new(User::AuthType).string()) + .col(ColumnDef::new(User::AuthValue).string()) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Object::Table) + .col( + ColumnDef::new(Object::Oid) + .integer() + .auto_increment() + .primary_key(), + ) + .col(ColumnDef::new(Object::ObjType).string().not_null()) + .col(ColumnDef::new(Object::OwnerId).integer().not_null()) + .col(ColumnDef::new(Object::SchemaId).integer()) + .col(ColumnDef::new(Object::DatabaseId).integer()) + .col( + ColumnDef::new(Object::InitializedAt) + .timestamp() + .default(Expr::current_timestamp()) + .not_null(), + ) + .col( + ColumnDef::new(Object::CreatedAt) + .timestamp() + .default(Expr::current_timestamp()) + .not_null(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_object_owner_id") + .from(Object::Table, Object::OwnerId) + .to(User::Table, User::UserId) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_object_database_id") + .from(Object::Table, Object::DatabaseId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_object_schema_id") + .from(Object::Table, Object::SchemaId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(UserPrivilege::Table) + .col( + ColumnDef::new(UserPrivilege::Id) + .integer() + .primary_key() + .auto_increment(), + ) + .col(ColumnDef::new(UserPrivilege::UserId).integer().not_null()) + .col(ColumnDef::new(UserPrivilege::Oid).integer().not_null()) + .col( + ColumnDef::new(UserPrivilege::GrantedBy) + .integer() + .not_null(), + ) + .col(ColumnDef::new(UserPrivilege::Actions).string().not_null()) + .col( + ColumnDef::new(UserPrivilege::WithGrantOption) + .boolean() + .not_null(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_user_privilege_user_id") + .from(UserPrivilege::Table, UserPrivilege::UserId) + .to(User::Table, User::UserId) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_user_privilege_granted_by") + .from(UserPrivilege::Table, UserPrivilege::GrantedBy) + .to(User::Table, User::UserId) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_user_privilege_oid") + .from(UserPrivilege::Table, UserPrivilege::Oid) + .to(Object::Table, Object::Oid) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(ObjectDependency::Table) + .col( + ColumnDef::new(ObjectDependency::Id) + .integer() + .auto_increment() + .primary_key(), + ) + .col(ColumnDef::new(ObjectDependency::Oid).integer().not_null()) + .col( + ColumnDef::new(ObjectDependency::UsedBy) + .integer() + .not_null(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_object_dependency_oid") + .from(ObjectDependency::Table, ObjectDependency::Oid) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_object_dependency_used_by") + .from(ObjectDependency::Table, ObjectDependency::UsedBy) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Database::Table) + .col(ColumnDef::new(Database::DatabaseId).integer().primary_key()) + .col( + ColumnDef::new(Database::Name) + .string() + .unique_key() + .not_null(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_database_object_id") + .from(Database::Table, Database::DatabaseId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Schema::Table) + .col(ColumnDef::new(Schema::SchemaId).integer().primary_key()) + .col(ColumnDef::new(Schema::Name).string().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_schema_object_id") + .from(Schema::Table, Schema::SchemaId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Fragment::Table) + .col( + ColumnDef::new(Fragment::FragmentId) + .integer() + .primary_key() + .auto_increment(), + ) + .col(ColumnDef::new(Fragment::TableId).integer().not_null()) + .col( + ColumnDef::new(Fragment::FragmentTypeMask) + .integer() + .not_null(), + ) + .col( + ColumnDef::new(Fragment::DistributionType) + .string() + .not_null(), + ) + .col(ColumnDef::new(Fragment::StreamNode).json().not_null()) + .col(ColumnDef::new(Fragment::VnodeMapping).json()) + .col(ColumnDef::new(Fragment::StateTableIds).json()) + .col(ColumnDef::new(Fragment::UpstreamFragmentId).json()) + .col(ColumnDef::new(Fragment::DispatcherType).string()) + .col(ColumnDef::new(Fragment::DistKeyIndices).json()) + .col(ColumnDef::new(Fragment::OutputIndices).json()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_fragment_table_id") + .from(Fragment::Table, Fragment::TableId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Actor::Table) + .col( + ColumnDef::new(Actor::ActorId) + .integer() + .primary_key() + .auto_increment(), + ) + .col(ColumnDef::new(Actor::FragmentId).integer().not_null()) + .col(ColumnDef::new(Actor::Status).string()) + .col(ColumnDef::new(Actor::Splits).json()) + .col(ColumnDef::new(Actor::ParallelUnitId).integer().not_null()) + .col(ColumnDef::new(Actor::UpstreamActorIds).json()) + .col(ColumnDef::new(Actor::Dispatchers).json()) + .col(ColumnDef::new(Actor::VnodeBitmap).string()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_actor_fragment_id") + .from(Actor::Table, Actor::FragmentId) + .to(Fragment::Table, Fragment::FragmentId) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Connection::Table) + .col( + ColumnDef::new(Connection::ConnectionId) + .integer() + .primary_key(), + ) + .col(ColumnDef::new(Connection::Name).string().not_null()) + .col(ColumnDef::new(Connection::Info).json().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_connection_object_id") + .from(Connection::Table, Connection::ConnectionId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Source::Table) + .col(ColumnDef::new(Source::SourceId).integer().primary_key()) + .col(ColumnDef::new(Source::Name).string().not_null()) + .col(ColumnDef::new(Source::RowIdIndex).integer()) + .col(ColumnDef::new(Source::Columns).json().not_null()) + .col(ColumnDef::new(Source::PkColumnIds).json().not_null()) + .col(ColumnDef::new(Source::Properties).json().not_null()) + .col(ColumnDef::new(Source::Definition).string().not_null()) + .col(ColumnDef::new(Source::SourceInfo).json()) + .col(ColumnDef::new(Source::WatermarkDescs).json().not_null()) + .col(ColumnDef::new(Source::OptionalAssociatedTableId).integer()) + .col(ColumnDef::new(Source::ConnectionId).integer()) + .col(ColumnDef::new(Source::Version).big_integer().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_source_object_id") + .from(Source::Table, Source::SourceId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_source_connection_id") + .from(Source::Table, Source::ConnectionId) + .to(Connection::Table, Connection::ConnectionId) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Table::Table) + .col(ColumnDef::new(Table::TableId).integer().primary_key()) + .col(ColumnDef::new(Table::Name).string().not_null()) + .col(ColumnDef::new(Table::OptionalAssociatedSourceId).integer()) + .col(ColumnDef::new(Table::TableType).string().not_null()) + .col(ColumnDef::new(Table::Columns).json().not_null()) + .col(ColumnDef::new(Table::Pk).json().not_null()) + .col(ColumnDef::new(Table::DistributionKey).json().not_null()) + .col(ColumnDef::new(Table::StreamKey).json().not_null()) + .col(ColumnDef::new(Table::AppendOnly).boolean().not_null()) + .col(ColumnDef::new(Table::Properties).json().not_null()) + .col(ColumnDef::new(Table::FragmentId).integer().not_null()) + .col(ColumnDef::new(Table::VnodeColIndex).integer()) + .col(ColumnDef::new(Table::RowIdIndex).integer()) + .col(ColumnDef::new(Table::ValueIndices).json().not_null()) + .col(ColumnDef::new(Table::Definition).string().not_null()) + .col( + ColumnDef::new(Table::HandlePkConflictBehavior) + .string() + .not_null(), + ) + .col( + ColumnDef::new(Table::ReadPrefixLenHint) + .integer() + .not_null(), + ) + .col(ColumnDef::new(Table::WatermarkIndices).json().not_null()) + .col(ColumnDef::new(Table::DistKeyInPk).json().not_null()) + .col(ColumnDef::new(Table::DmlFragmentId).integer()) + .col(ColumnDef::new(Table::Cardinality).json()) + .col( + ColumnDef::new(Table::CleanedByWatermark) + .boolean() + .not_null(), + ) + .col(ColumnDef::new(Table::JobStatus).string().not_null()) + .col(ColumnDef::new(Table::CreateType).string().not_null()) + .col(ColumnDef::new(Table::Version).json().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_table_object_id") + .from(Table::Table, Table::TableId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_table_fragment_id") + .from(Table::Table, Table::FragmentId) + .to(Fragment::Table, Fragment::FragmentId) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_table_dml_fragment_id") + .from(Table::Table, Table::DmlFragmentId) + .to(Fragment::Table, Fragment::FragmentId) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_table_optional_associated_source_id") + .from(Table::Table, Table::OptionalAssociatedSourceId) + .to(Source::Table, Source::SourceId) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Sink::Table) + .col(ColumnDef::new(Sink::SinkId).integer().primary_key()) + .col(ColumnDef::new(Sink::Name).string().not_null()) + .col(ColumnDef::new(Sink::Columns).json().not_null()) + .col(ColumnDef::new(Sink::PlanPk).json().not_null()) + .col(ColumnDef::new(Sink::DistributionKey).json().not_null()) + .col(ColumnDef::new(Sink::DownstreamPk).json().not_null()) + .col(ColumnDef::new(Sink::SinkType).string().not_null()) + .col(ColumnDef::new(Sink::Properties).json().not_null()) + .col(ColumnDef::new(Sink::Definition).string().not_null()) + .col(ColumnDef::new(Sink::ConnectionId).integer()) + .col(ColumnDef::new(Sink::DbName).string().not_null()) + .col(ColumnDef::new(Sink::SinkFromName).string().not_null()) + .col(ColumnDef::new(Sink::SinkFormatDesc).json()) + .col(ColumnDef::new(Sink::JobStatus).string().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_sink_object_id") + .from(Sink::Table, Sink::SinkId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_sink_connection_id") + .from(Sink::Table, Sink::ConnectionId) + .to(Connection::Table, Connection::ConnectionId) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(View::Table) + .col(ColumnDef::new(View::ViewId).integer().primary_key()) + .col(ColumnDef::new(View::Name).string().not_null()) + .col(ColumnDef::new(View::Properties).json().not_null()) + .col(ColumnDef::new(View::Definition).string().not_null()) + .col(ColumnDef::new(View::Columns).json().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_view_object_id") + .from(View::Table, View::ViewId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Index::Table) + .col(ColumnDef::new(Index::IndexId).integer().primary_key()) + .col(ColumnDef::new(Index::Name).string().not_null()) + .col(ColumnDef::new(Index::IndexTableId).integer().not_null()) + .col(ColumnDef::new(Index::PrimaryTableId).integer().not_null()) + .col(ColumnDef::new(Index::IndexItems).json().not_null()) + .col(ColumnDef::new(Index::OriginalColumns).json().not_null()) + .col(ColumnDef::new(Index::JobStatus).string().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_index_object_id") + .from(Index::Table, Index::IndexId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_index_index_table_id") + .from(Index::Table, Index::IndexTableId) + .to(Table::Table, Table::TableId) + .to_owned(), + ) + .foreign_key( + &mut ForeignKey::create() + .name("FK_index_primary_table_id") + .from(Index::Table, Index::PrimaryTableId) + .to(Table::Table, Table::TableId) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(Function::Table) + .col(ColumnDef::new(Function::FunctionId).integer().primary_key()) + .col(ColumnDef::new(Function::Name).string().not_null()) + .col(ColumnDef::new(Function::ArgTypes).json().not_null()) + .col(ColumnDef::new(Function::ReturnType).json().not_null()) + .col(ColumnDef::new(Function::Language).string().not_null()) + .col(ColumnDef::new(Function::Link).string().not_null()) + .col(ColumnDef::new(Function::Identifier).string().not_null()) + .col(ColumnDef::new(Function::Kind).string().not_null()) + .foreign_key( + &mut ForeignKey::create() + .name("FK_function_object_id") + .from(Function::Table, Function::FunctionId) + .to(Object::Table, Object::Oid) + .on_delete(ForeignKeyAction::Cascade) + .to_owned(), + ) + .to_owned(), + ) + .await?; + manager + .create_table( + MigrationTable::create() + .table(SystemParameter::Table) + .col( + ColumnDef::new(SystemParameter::Name) + .string() + .primary_key() + .not_null(), + ) + .col(ColumnDef::new(SystemParameter::Value).string().not_null()) + .col( + ColumnDef::new(SystemParameter::IsMutable) + .boolean() + .not_null(), + ) + .col(ColumnDef::new(SystemParameter::Description).string()) + .to_owned(), + ) + .await?; + + // 3. create indexes. + manager + .create_index( + MigrationIndex::create() + .table(Worker::Table) + .name("idx_worker_host_port") + .unique() + .col(Worker::Host) + .col(Worker::Port) + .to_owned(), + ) + .await?; + + // 4. initialize data. + let insert_cluster_id = Query::insert() + .into_table(Cluster::Table) + .columns([Cluster::ClusterId]) + .values_panic([uuid::Uuid::new_v4().into()]) + .to_owned(); + let insert_sys_users = Query::insert() + .into_table(User::Table) + .columns([ + User::Name, + User::IsSuper, + User::CanCreateUser, + User::CanCreateDb, + User::CanLogin, + ]) + .values_panic([ + "root".into(), + true.into(), + true.into(), + true.into(), + true.into(), + ]) + .values_panic([ + "postgres".into(), + true.into(), + true.into(), + true.into(), + true.into(), + ]) + .to_owned(); + + // Since User table is newly created, we assume that the initial user id of `root` is 1 and `postgres` is 2. + let insert_objects = Query::insert() + .into_table(Object::Table) + .columns([Object::ObjType, Object::OwnerId, Object::DatabaseId]) + .values_panic(["DATABASE".into(), 1.into(), None::.into()]) + .values_panic(["SCHEMA".into(), 1.into(), 1.into()]) // public + .values_panic(["SCHEMA".into(), 1.into(), 1.into()]) // pg_catalog + .values_panic(["SCHEMA".into(), 1.into(), 1.into()]) // information_schema + .values_panic(["SCHEMA".into(), 1.into(), 1.into()]) // rw_catalog + .to_owned(); + + // Since all tables are newly created, we assume that the initial object id of `dev` is 1 and the schemas' ids are 2, 3, 4, 5. + let insert_sys_database = Query::insert() + .into_table(Database::Table) + .columns([Database::DatabaseId, Database::Name]) + .values_panic([1.into(), "dev".into()]) + .to_owned(); + let insert_sys_schemas = Query::insert() + .into_table(Schema::Table) + .columns([Schema::SchemaId, Schema::Name]) + .values_panic([2.into(), "public".into()]) + .values_panic([3.into(), "pg_catalog".into()]) + .values_panic([4.into(), "information_schema".into()]) + .values_panic([5.into(), "rw_catalog".into()]) + .to_owned(); + + manager.exec_stmt(insert_cluster_id).await?; + manager.exec_stmt(insert_sys_users).await?; + manager.exec_stmt(insert_objects).await?; + manager.exec_stmt(insert_sys_database).await?; + manager.exec_stmt(insert_sys_schemas).await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + macro_rules! drop_tables { + ($manager:expr, $( $table:ident ),+) => { + $( + $manager + .drop_table( + MigrationTable::drop() + .table($table::Table) + .if_exists() + .cascade() + .to_owned(), + ) + .await?; + )+ + }; + } + + // drop tables cascade. + drop_tables!( + manager, + Cluster, + Worker, + WorkerProperty, + User, + UserPrivilege, + Database, + Schema, + Fragment, + Actor, + Table, + Source, + Sink, + Connection, + View, + Index, + Function, + Object, + ObjectDependency, + SystemParameter + ); + Ok(()) + } +} + +#[derive(DeriveIden)] +enum Cluster { + Table, + ClusterId, + CreatedAt, +} + +#[derive(DeriveIden)] +enum Worker { + Table, + WorkerId, + WorkerType, + Host, + Port, + TransactionId, + Status, +} + +#[derive(DeriveIden)] +enum WorkerProperty { + Table, + WorkerId, + ParallelUnitIds, + IsStreaming, + IsServing, + IsUnschedulable, +} + +#[derive(DeriveIden)] +enum User { + Table, + UserId, + Name, + IsSuper, + CanCreateDb, + CanCreateUser, + CanLogin, + AuthType, + AuthValue, +} + +#[derive(DeriveIden)] +enum UserPrivilege { + Table, + Id, + UserId, + Oid, + GrantedBy, + Actions, + WithGrantOption, +} + +#[derive(DeriveIden)] +enum Database { + Table, + DatabaseId, + Name, +} + +#[derive(DeriveIden)] +enum Schema { + Table, + SchemaId, + Name, +} + +#[derive(DeriveIden)] +enum Fragment { + Table, + FragmentId, + TableId, + FragmentTypeMask, + DistributionType, + StreamNode, + VnodeMapping, + StateTableIds, + UpstreamFragmentId, + DispatcherType, + DistKeyIndices, + OutputIndices, +} + +#[derive(DeriveIden)] +enum Actor { + Table, + ActorId, + FragmentId, + Status, + Splits, + ParallelUnitId, + UpstreamActorIds, + Dispatchers, + VnodeBitmap, +} + +#[derive(DeriveIden)] +#[allow(clippy::enum_variant_names)] +enum Table { + Table, + TableId, + Name, + OptionalAssociatedSourceId, + TableType, + Columns, + Pk, + DistributionKey, + StreamKey, + AppendOnly, + Properties, + FragmentId, + VnodeColIndex, + RowIdIndex, + ValueIndices, + Definition, + HandlePkConflictBehavior, + ReadPrefixLenHint, + WatermarkIndices, + DistKeyInPk, + DmlFragmentId, + Cardinality, + CleanedByWatermark, + JobStatus, + CreateType, + Version, +} + +#[derive(DeriveIden)] +enum Source { + Table, + SourceId, + Name, + RowIdIndex, + Columns, + PkColumnIds, + Properties, + Definition, + SourceInfo, + WatermarkDescs, + OptionalAssociatedTableId, + ConnectionId, + Version, +} + +#[derive(DeriveIden)] +enum Sink { + Table, + SinkId, + Name, + Columns, + PlanPk, + DistributionKey, + DownstreamPk, + SinkType, + Properties, + Definition, + ConnectionId, + DbName, + SinkFromName, + SinkFormatDesc, + JobStatus, +} + +#[derive(DeriveIden)] +enum Connection { + Table, + ConnectionId, + Name, + Info, +} + +#[derive(DeriveIden)] +enum View { + Table, + ViewId, + Name, + Properties, + Definition, + Columns, +} + +#[derive(DeriveIden)] +enum Index { + Table, + IndexId, + Name, + IndexTableId, + PrimaryTableId, + IndexItems, + OriginalColumns, + JobStatus, +} + +#[derive(DeriveIden)] +enum Function { + Table, + FunctionId, + Name, + ArgTypes, + ReturnType, + Language, + Link, + Identifier, + Kind, +} + +#[derive(DeriveIden)] +enum Object { + Table, + Oid, + ObjType, + OwnerId, + SchemaId, + DatabaseId, + InitializedAt, + CreatedAt, +} + +#[derive(DeriveIden)] +enum ObjectDependency { + Table, + Id, + Oid, + UsedBy, +} + +#[derive(DeriveIden)] +enum SystemParameter { + Table, + Name, + Value, + IsMutable, + Description, +} diff --git a/src/meta/src/model_v2/migration/src/m20231008_020431_hummock.rs b/src/meta/src/model_v2/migration/src/m20231008_020431_hummock.rs new file mode 100644 index 0000000000000..ab01980990f34 --- /dev/null +++ b/src/meta/src/model_v2/migration/src/m20231008_020431_hummock.rs @@ -0,0 +1,264 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + macro_rules! assert_not_has_tables { + ($manager:expr, $( $table:ident ),+) => { + $( + assert!( + !$manager + .has_table($table::Table.to_string()) + .await? + ); + )+ + }; + } + assert_not_has_tables!( + manager, + CompactionTask, + CompactionConfig, + CompactionStatus, + HummockPinnedVersion, + HummockPinnedSnapshot, + HummockVersionDelta, + HummockVersionStats + ); + + manager + .create_table( + Table::create() + .table(CompactionTask::Table) + .col( + ColumnDef::new(CompactionTask::Id) + .big_integer() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(CompactionTask::Task) + .json_binary() + .not_null(), + ) + .col( + ColumnDef::new(CompactionTask::ContextId) + .integer() + .not_null(), + ) + .to_owned(), + ) + .await?; + + manager + .create_table( + Table::create() + .table(CompactionConfig::Table) + .col( + ColumnDef::new(CompactionConfig::CompactionGroupId) + .big_integer() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(CompactionConfig::Config).json_binary()) + .to_owned(), + ) + .await?; + + manager + .create_table( + Table::create() + .table(CompactionStatus::Table) + .col( + ColumnDef::new(CompactionStatus::CompactionGroupId) + .big_integer() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(CompactionStatus::Status).json_binary()) + .to_owned(), + ) + .await?; + + manager + .create_table( + Table::create() + .table(HummockPinnedVersion::Table) + .col( + ColumnDef::new(HummockPinnedVersion::ContextId) + .integer() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(HummockPinnedVersion::MinPinnedId) + .big_integer() + .not_null(), + ) + .to_owned(), + ) + .await?; + + manager + .create_table( + Table::create() + .table(HummockPinnedSnapshot::Table) + .col( + ColumnDef::new(HummockPinnedSnapshot::ContextId) + .integer() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(HummockPinnedSnapshot::MinPinnedSnapshot) + .big_integer() + .not_null(), + ) + .to_owned(), + ) + .await?; + + manager + .create_table( + Table::create() + .table(HummockVersionDelta::Table) + .col( + ColumnDef::new(HummockVersionDelta::Id) + .big_integer() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(HummockVersionDelta::PrevId) + .big_integer() + .not_null(), + ) + .col(ColumnDef::new(HummockVersionDelta::GroupDeltas).json_binary()) + .col( + ColumnDef::new(HummockVersionDelta::MaxCommittedEpoch) + .big_integer() + .not_null(), + ) + .col( + ColumnDef::new(HummockVersionDelta::SafeEpoch) + .big_integer() + .not_null(), + ) + .col( + ColumnDef::new(HummockVersionDelta::TrivialMove) + .boolean() + .not_null(), + ) + .col(ColumnDef::new(HummockVersionDelta::GcObjectIds).json_binary()) + .to_owned(), + ) + .await?; + + manager + .create_table( + Table::create() + .table(HummockVersionStats::Table) + .col( + ColumnDef::new(HummockVersionStats::Id) + .big_integer() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(HummockVersionStats::Stats) + .json_binary() + .not_null(), + ) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + macro_rules! drop_tables { + ($manager:expr, $( $table:ident ),+) => { + $( + $manager + .drop_table( + Table::drop() + .table($table::Table) + .if_exists() + .cascade() + .to_owned(), + ) + .await?; + )+ + }; + } + drop_tables!( + manager, + CompactionTask, + CompactionConfig, + CompactionStatus, + HummockPinnedVersion, + HummockPinnedSnapshot, + HummockVersionDelta, + HummockVersionStats + ); + Ok(()) + } +} + +#[derive(DeriveIden)] +enum CompactionTask { + Table, + Id, + Task, + ContextId, +} + +#[derive(DeriveIden)] +enum CompactionConfig { + Table, + CompactionGroupId, + Config, +} + +#[derive(DeriveIden)] +enum CompactionStatus { + Table, + CompactionGroupId, + Status, +} + +#[derive(DeriveIden)] +enum HummockPinnedVersion { + Table, + ContextId, + MinPinnedId, +} + +#[derive(DeriveIden)] +enum HummockPinnedSnapshot { + Table, + ContextId, + MinPinnedSnapshot, +} + +#[derive(DeriveIden)] +enum HummockVersionDelta { + Table, + Id, + PrevId, + GroupDeltas, + MaxCommittedEpoch, + SafeEpoch, + TrivialMove, + GcObjectIds, +} + +#[derive(DeriveIden)] +enum HummockVersionStats { + Table, + Id, + Stats, +} diff --git a/src/meta/src/model_v2/migration/src/main.rs b/src/meta/src/model_v2/migration/src/main.rs new file mode 100644 index 0000000000000..9354e45ecd198 --- /dev/null +++ b/src/meta/src/model_v2/migration/src/main.rs @@ -0,0 +1,6 @@ +use sea_orm_migration::prelude::*; + +#[async_std::main] +async fn main() { + cli::run_cli(model_migration::Migrator).await; +} diff --git a/src/meta/src/model_v2/mod.rs b/src/meta/src/model_v2/mod.rs new file mode 100644 index 0000000000000..1c2f928063fff --- /dev/null +++ b/src/meta/src/model_v2/mod.rs @@ -0,0 +1,136 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; + +use risingwave_pb::catalog::{PbCreateType, PbStreamJobStatus}; +use sea_orm::{DeriveActiveEnum, EnumIter, FromJsonQueryResult}; +use serde::{Deserialize, Serialize}; + +pub mod prelude; + +pub mod actor; +pub mod cluster; +pub mod compaction_config; +pub mod compaction_status; +pub mod compaction_task; +pub mod connection; +pub mod database; +pub mod ext; +pub mod fragment; +pub mod function; +pub mod hummock_pinned_snapshot; +pub mod hummock_pinned_version; +pub mod hummock_version_delta; +pub mod hummock_version_stats; +pub mod index; +pub mod object; +pub mod object_dependency; +pub mod schema; +pub mod sink; +pub mod source; +pub mod system_parameter; +pub mod table; +pub mod trx; +pub mod user; +pub mod user_privilege; +pub mod view; +pub mod worker; +pub mod worker_property; + +pub type WorkerId = u32; +pub type TransactionId = u32; + +pub type ObjectId = u32; +pub type DatabaseId = ObjectId; +pub type SchemaId = ObjectId; +pub type TableId = ObjectId; +pub type SourceId = ObjectId; +pub type SinkId = ObjectId; +pub type IndexId = ObjectId; +pub type ViewId = ObjectId; +pub type FunctionId = ObjectId; +pub type ConnectionId = ObjectId; +pub type UserId = u32; + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum JobStatus { + #[sea_orm(string_value = "CREATING")] + Creating, + #[sea_orm(string_value = "CREATED")] + Created, +} + +impl From for PbStreamJobStatus { + fn from(job_status: JobStatus) -> Self { + match job_status { + JobStatus::Creating => Self::Creating, + JobStatus::Created => Self::Created, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum CreateType { + #[sea_orm(string_value = "BACKGROUND")] + Background, + #[sea_orm(string_value = "FOREGROUND")] + Foreground, +} + +impl From for PbCreateType { + fn from(create_type: CreateType) -> Self { + match create_type { + CreateType::Background => Self::Background, + CreateType::Foreground => Self::Foreground, + } + } +} + +/// Defines struct with a single pb field that derives `FromJsonQueryResult`, it will helps to map json value stored in database to Pb struct. +macro_rules! derive_from_json_struct { + ($struct_name:ident, $field_type:ty) => { + #[derive(Clone, Debug, PartialEq, FromJsonQueryResult, Serialize, Deserialize, Default)] + pub struct $struct_name(pub $field_type); + impl Eq for $struct_name {} + }; +} + +derive_from_json_struct!(I32Array, Vec); +derive_from_json_struct!(DataType, risingwave_pb::data::DataType); +derive_from_json_struct!(DataTypeArray, Vec); +derive_from_json_struct!(FieldArray, Vec); +derive_from_json_struct!(Property, HashMap); +derive_from_json_struct!(ColumnCatalog, risingwave_pb::plan_common::PbColumnCatalog); +derive_from_json_struct!( + ColumnCatalogArray, + Vec +); +derive_from_json_struct!(StreamSourceInfo, risingwave_pb::catalog::PbStreamSourceInfo); +derive_from_json_struct!(WatermarkDesc, risingwave_pb::catalog::PbWatermarkDesc); +derive_from_json_struct!( + WatermarkDescArray, + Vec +); +derive_from_json_struct!(ExprNodeArray, Vec); +derive_from_json_struct!(ColumnOrderArray, Vec); +derive_from_json_struct!(SinkFormatDesc, risingwave_pb::catalog::PbSinkFormatDesc); +derive_from_json_struct!(Cardinality, risingwave_pb::plan_common::PbCardinality); +derive_from_json_struct!(TableVersion, risingwave_pb::catalog::table::PbTableVersion); +derive_from_json_struct!( + PrivateLinkService, + risingwave_pb::catalog::connection::PbPrivateLinkService +); diff --git a/src/meta/src/model_v2/object.rs b/src/meta/src/model_v2/object.rs new file mode 100644 index 0000000000000..5048f93a483d9 --- /dev/null +++ b/src/meta/src/model_v2/object.rs @@ -0,0 +1,193 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{DatabaseId, ObjectId, SchemaId, UserId}; + +#[derive(Clone, Debug, PartialEq, Eq, Copy, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum ObjectType { + #[sea_orm(string_value = "DATABASE")] + Database, + #[sea_orm(string_value = "SCHEMA")] + Schema, + #[sea_orm(string_value = "TABLE")] + Table, + #[sea_orm(string_value = "SOURCE")] + Source, + #[sea_orm(string_value = "SINK")] + Sink, + #[sea_orm(string_value = "VIEW")] + View, + #[sea_orm(string_value = "INDEX")] + Index, + #[sea_orm(string_value = "FUNCTION")] + Function, + #[sea_orm(string_value = "CONNECTION")] + Connection, +} + +impl ObjectType { + pub fn as_str(&self) -> &'static str { + match self { + ObjectType::Database => "database", + ObjectType::Schema => "schema", + ObjectType::Table => "table", + ObjectType::Source => "source", + ObjectType::Sink => "sink", + ObjectType::View => "view", + ObjectType::Index => "index", + ObjectType::Function => "function", + ObjectType::Connection => "connection", + } + } +} + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "object")] +pub struct Model { + #[sea_orm(primary_key)] + pub oid: ObjectId, + pub obj_type: ObjectType, + pub owner_id: UserId, + pub schema_id: Option, + pub database_id: Option, + pub initialized_at: DateTime, + pub created_at: DateTime, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::connection::Entity")] + Connection, + #[sea_orm(has_many = "super::database::Entity")] + Database, + #[sea_orm(has_many = "super::fragment::Entity")] + Fragment, + #[sea_orm(has_many = "super::function::Entity")] + Function, + #[sea_orm(has_many = "super::index::Entity")] + Index, + #[sea_orm( + belongs_to = "Entity", + from = "Column::DatabaseId", + to = "Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + SelfRef2, + #[sea_orm( + belongs_to = "Entity", + from = "Column::SchemaId", + to = "Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + SelfRef1, + #[sea_orm(has_many = "super::schema::Entity")] + Schema, + #[sea_orm(has_many = "super::sink::Entity")] + Sink, + #[sea_orm(has_many = "super::source::Entity")] + Source, + #[sea_orm(has_many = "super::table::Entity")] + Table, + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::OwnerId", + to = "super::user::Column::UserId", + on_update = "NoAction", + on_delete = "Cascade" + )] + User, + #[sea_orm(has_many = "super::user_privilege::Entity")] + UserPrivilege, + #[sea_orm(has_many = "super::view::Entity")] + View, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Connection.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Database.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Fragment.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Function.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Index.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Schema.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Sink.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Source.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Table.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::User.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::UserPrivilege.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::View.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/object_dependency.rs b/src/meta/src/model_v2/object_dependency.rs new file mode 100644 index 0000000000000..53800112a7370 --- /dev/null +++ b/src/meta/src/model_v2/object_dependency.rs @@ -0,0 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{ObjectId, UserId}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "object_dependency")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub oid: ObjectId, + pub used_by: UserId, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::Oid", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object2, + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::UsedBy", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object1, +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/prelude.rs b/src/meta/src/model_v2/prelude.rs new file mode 100644 index 0000000000000..ab9670f712f04 --- /dev/null +++ b/src/meta/src/model_v2/prelude.rs @@ -0,0 +1,40 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use super::actor::Entity as Actor; +pub use super::cluster::Entity as Cluster; +pub use super::compaction_config::Entity as CompactionConfig; +pub use super::compaction_status::Entity as CompactionStatus; +pub use super::compaction_task::Entity as CompactionTask; +pub use super::connection::Entity as Connection; +pub use super::database::Entity as Database; +pub use super::fragment::Entity as Fragment; +pub use super::function::Entity as Function; +pub use super::hummock_pinned_snapshot::Entity as HummockPinnedSnapshot; +pub use super::hummock_pinned_version::Entity as HummockPinnedVersion; +pub use super::hummock_version_delta::Entity as HummockVersionDelta; +pub use super::hummock_version_stats::Entity as HummockVersionStats; +pub use super::index::Entity as Index; +pub use super::object::Entity as Object; +pub use super::object_dependency::Entity as ObjectDependency; +pub use super::schema::Entity as Schema; +pub use super::sink::Entity as Sink; +pub use super::source::Entity as Source; +pub use super::system_parameter::Entity as SystemParameter; +pub use super::table::Entity as Table; +pub use super::user::Entity as User; +pub use super::user_privilege::Entity as UserPrivilege; +pub use super::view::Entity as View; +pub use super::worker::Entity as Worker; +pub use super::worker_property::Entity as WorkerProperty; diff --git a/src/meta/src/model_v2/schema.rs b/src/meta/src/model_v2/schema.rs new file mode 100644 index 0000000000000..2c28665fd06f0 --- /dev/null +++ b/src/meta/src/model_v2/schema.rs @@ -0,0 +1,45 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::SchemaId; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "schema")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub schema_id: SchemaId, + pub name: String, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::SchemaId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/sink.rs b/src/meta/src/model_v2/sink.rs new file mode 100644 index 0000000000000..bef46f1d7195f --- /dev/null +++ b/src/meta/src/model_v2/sink.rs @@ -0,0 +1,96 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::catalog::PbSinkType; +use sea_orm::entity::prelude::*; + +use crate::model_v2::{ + ColumnCatalogArray, ColumnOrderArray, ConnectionId, I32Array, JobStatus, Property, + SinkFormatDesc, SinkId, +}; + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum SinkType { + #[sea_orm(string_value = "APPEND_ONLY")] + AppendOnly, + #[sea_orm(string_value = "FORCE_APPEND_ONLY")] + ForceAppendOnly, + #[sea_orm(string_value = "UPSERT")] + Upsert, +} + +impl From for PbSinkType { + fn from(sink_type: SinkType) -> Self { + match sink_type { + SinkType::AppendOnly => Self::AppendOnly, + SinkType::ForceAppendOnly => Self::ForceAppendOnly, + SinkType::Upsert => Self::Upsert, + } + } +} + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "sink")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub sink_id: SinkId, + pub name: String, + pub columns: ColumnCatalogArray, + pub plan_pk: ColumnOrderArray, + pub distribution_key: I32Array, + pub downstream_pk: I32Array, + pub sink_type: SinkType, + pub properties: Property, + pub definition: String, + pub connection_id: Option, + pub db_name: String, + pub sink_from_name: String, + pub sink_format_desc: Option, + pub job_status: JobStatus, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::connection::Entity", + from = "Column::ConnectionId", + to = "super::connection::Column::ConnectionId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Connection, + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::SinkId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Connection.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/source.rs b/src/meta/src/model_v2/source.rs new file mode 100644 index 0000000000000..2ad1de7914d96 --- /dev/null +++ b/src/meta/src/model_v2/source.rs @@ -0,0 +1,80 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{ + ColumnCatalogArray, ConnectionId, I32Array, Property, SourceId, StreamSourceInfo, TableId, + WatermarkDescArray, +}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "source")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub source_id: SourceId, + pub name: String, + pub row_id_index: Option, + pub columns: ColumnCatalogArray, + pub pk_column_ids: I32Array, + pub properties: Property, + pub definition: String, + pub source_info: Option, + pub watermark_descs: WatermarkDescArray, + pub optional_associated_table_id: Option, + pub connection_id: Option, + pub version: u64, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::connection::Entity", + from = "Column::ConnectionId", + to = "super::connection::Column::ConnectionId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Connection, + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::SourceId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, + #[sea_orm(has_many = "super::table::Entity")] + Table, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Connection.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Table.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/system_parameter.rs b/src/meta/src/model_v2/system_parameter.rs new file mode 100644 index 0000000000000..366c3f743187b --- /dev/null +++ b/src/meta/src/model_v2/system_parameter.rs @@ -0,0 +1,30 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "system_parameter")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub name: String, + pub value: String, + pub is_mutable: bool, + pub description: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/table.rs b/src/meta/src/model_v2/table.rs new file mode 100644 index 0000000000000..08caee7009f8f --- /dev/null +++ b/src/meta/src/model_v2/table.rs @@ -0,0 +1,148 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::catalog::table::PbTableType; +use risingwave_pb::catalog::PbHandleConflictBehavior; +use sea_orm::entity::prelude::*; + +use crate::model_v2::{ + Cardinality, ColumnCatalogArray, ColumnOrderArray, CreateType, I32Array, JobStatus, Property, + SourceId, TableId, TableVersion, +}; + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum TableType { + #[sea_orm(string_value = "TABLE")] + Table, + #[sea_orm(string_value = "MATERIALIZED_VIEW")] + MaterializedView, + #[sea_orm(string_value = "INDEX")] + Index, + #[sea_orm(string_value = "INTERNAL")] + Internal, +} + +impl From for PbTableType { + fn from(table_type: TableType) -> Self { + match table_type { + TableType::Table => Self::Table, + TableType::MaterializedView => Self::MaterializedView, + TableType::Index => Self::Index, + TableType::Internal => Self::Internal, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum HandleConflictBehavior { + #[sea_orm(string_value = "OVERWRITE")] + Overwrite, + #[sea_orm(string_value = "IGNORE")] + Ignore, + #[sea_orm(string_value = "NO_CHECK")] + NoCheck, +} + +impl From for PbHandleConflictBehavior { + fn from(handle_conflict_behavior: HandleConflictBehavior) -> Self { + match handle_conflict_behavior { + HandleConflictBehavior::Overwrite => Self::Overwrite, + HandleConflictBehavior::Ignore => Self::Ignore, + HandleConflictBehavior::NoCheck => Self::NoCheck, + } + } +} + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "table")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub table_id: TableId, + pub name: String, + pub optional_associated_source_id: Option, + pub table_type: TableType, + pub columns: ColumnCatalogArray, + pub pk: ColumnOrderArray, + pub distribution_key: I32Array, + pub stream_key: I32Array, + pub append_only: bool, + pub properties: Property, + pub fragment_id: i32, + pub vnode_col_index: Option, + pub row_id_index: Option, + pub value_indices: I32Array, + pub definition: String, + pub handle_pk_conflict_behavior: HandleConflictBehavior, + pub read_prefix_len_hint: u32, + pub watermark_indices: I32Array, + pub dist_key_in_pk: I32Array, + pub dml_fragment_id: Option, + pub cardinality: Option, + pub cleaned_by_watermark: bool, + pub job_status: JobStatus, + pub create_type: CreateType, + pub version: TableVersion, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::fragment::Entity", + from = "Column::DmlFragmentId", + to = "super::fragment::Column::FragmentId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Fragment2, + #[sea_orm( + belongs_to = "super::fragment::Entity", + from = "Column::FragmentId", + to = "super::fragment::Column::FragmentId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Fragment1, + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::TableId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, + #[sea_orm( + belongs_to = "super::source::Entity", + from = "Column::OptionalAssociatedSourceId", + to = "super::source::Column::SourceId", + on_update = "NoAction", + on_delete = "NoAction" + )] + Source, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Source.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/trx.rs b/src/meta/src/model_v2/trx.rs new file mode 100644 index 0000000000000..4bfe6d0261de4 --- /dev/null +++ b/src/meta/src/model_v2/trx.rs @@ -0,0 +1,276 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub type Transaction = sea_orm::DatabaseTransaction; + +#[cfg(not(madsim))] +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use risingwave_pb::hummock::HummockPinnedVersion; + use sea_orm::{EntityTrait, TransactionTrait}; + + use crate::controller::SqlMetaStore; + use crate::model::{BTreeMapTransaction, ValTransaction, VarTransaction}; + use crate::model_v2::hummock_pinned_version::Model as HummockPinnedVersionModel; + use crate::model_v2::prelude::HummockPinnedVersion as HummockPinnedVersionEntity; + use crate::model_v2::trx::Transaction; + + #[tokio::test] + async fn test_simple_var_transaction_commit() { + let store = SqlMetaStore::for_test().await; + let db = &store.conn; + let mut kv = HummockPinnedVersion { + context_id: 1, + min_pinned_id: 2, + }; + let mut num_txn = VarTransaction::<'_, Transaction, _>::new(&mut kv); + num_txn.min_pinned_id = 3; + assert_eq!(num_txn.min_pinned_id, 3); + let mut txn = db.begin().await.unwrap(); + num_txn.apply_to_txn(&mut txn).await.unwrap(); + txn.commit().await.unwrap(); + let db_val = HummockPinnedVersionEntity::find_by_id(1) + .one(db) + .await + .unwrap() + .unwrap(); + assert_eq!(db_val.min_pinned_id, 3); + num_txn.commit(); + assert_eq!(kv.min_pinned_id, 3); + } + + #[test] + fn test_simple_var_transaction_abort() { + let mut kv = HummockPinnedVersion { + context_id: 1, + min_pinned_id: 11, + }; + let mut num_txn = VarTransaction::<'_, Transaction, _>::new(&mut kv); + num_txn.min_pinned_id = 2; + num_txn.abort(); + assert_eq!(11, kv.min_pinned_id); + } + + #[tokio::test] + async fn test_tree_map_transaction_commit() { + let mut map: BTreeMap = BTreeMap::new(); + // to remove + map.insert( + 1, + HummockPinnedVersion { + context_id: 1, + min_pinned_id: 11, + }, + ); + // to-remove-after-modify + map.insert( + 2, + HummockPinnedVersion { + context_id: 2, + min_pinned_id: 22, + }, + ); + // first + map.insert( + 3, + HummockPinnedVersion { + context_id: 3, + min_pinned_id: 33, + }, + ); + + let mut map_copy = map.clone(); + let mut map_txn = BTreeMapTransaction::new(&mut map); + map_txn.remove(1); + map_txn.insert( + 2, + HummockPinnedVersion { + context_id: 2, + min_pinned_id: 0, + }, + ); + map_txn.remove(2); + // first + map_txn.insert( + 3, + HummockPinnedVersion { + context_id: 3, + min_pinned_id: 333, + }, + ); + // second + map_txn.insert( + 4, + HummockPinnedVersion { + context_id: 4, + min_pinned_id: 44, + }, + ); + assert_eq!( + &HummockPinnedVersion { + context_id: 4, + min_pinned_id: 44 + }, + map_txn.get(&4).unwrap() + ); + // third + map_txn.insert( + 5, + HummockPinnedVersion { + context_id: 5, + min_pinned_id: 55, + }, + ); + assert_eq!( + &HummockPinnedVersion { + context_id: 5, + min_pinned_id: 55 + }, + map_txn.get(&5).unwrap() + ); + + let mut third_entry = map_txn.get_mut(5).unwrap(); + third_entry.min_pinned_id = 555; + assert_eq!( + &HummockPinnedVersion { + context_id: 5, + min_pinned_id: 555 + }, + map_txn.get(&5).unwrap() + ); + + let store = SqlMetaStore::for_test().await; + let db = &store.conn; + let mut txn = db.begin().await.unwrap(); + map_txn.apply_to_txn(&mut txn).await.unwrap(); + txn.commit().await.unwrap(); + + let db_rows: Vec = + HummockPinnedVersionEntity::find().all(db).await.unwrap(); + assert_eq!(db_rows.len(), 3); + assert_eq!( + 1, + db_rows + .iter() + .filter(|m| m.context_id == 3 && m.min_pinned_id == 333) + .count() + ); + assert_eq!( + 1, + db_rows + .iter() + .filter(|m| m.context_id == 4 && m.min_pinned_id == 44) + .count() + ); + assert_eq!( + 1, + db_rows + .iter() + .filter(|m| m.context_id == 5 && m.min_pinned_id == 555) + .count() + ); + map_txn.commit(); + + // replay the change to local copy and compare + map_copy.remove(&1).unwrap(); + map_copy.insert( + 2, + HummockPinnedVersion { + context_id: 2, + min_pinned_id: 22, + }, + ); + map_copy.remove(&2).unwrap(); + map_copy.insert( + 3, + HummockPinnedVersion { + context_id: 3, + min_pinned_id: 333, + }, + ); + map_copy.insert( + 4, + HummockPinnedVersion { + context_id: 4, + min_pinned_id: 44, + }, + ); + map_copy.insert( + 5, + HummockPinnedVersion { + context_id: 5, + min_pinned_id: 555, + }, + ); + assert_eq!(map_copy, map); + } + + #[tokio::test] + async fn test_tree_map_entry_update_transaction_commit() { + let mut map: BTreeMap = BTreeMap::new(); + map.insert( + 1, + HummockPinnedVersion { + context_id: 1, + min_pinned_id: 11, + }, + ); + + let mut map_txn = BTreeMapTransaction::new(&mut map); + let mut first_entry_txn = map_txn.new_entry_txn(1).unwrap(); + first_entry_txn.min_pinned_id = 111; + + let store = SqlMetaStore::for_test().await; + let db = &store.conn; + let mut txn = db.begin().await.unwrap(); + first_entry_txn.apply_to_txn(&mut txn).await.unwrap(); + txn.commit().await.unwrap(); + first_entry_txn.commit(); + + let db_rows: Vec = + HummockPinnedVersionEntity::find().all(db).await.unwrap(); + assert_eq!(db_rows.len(), 1); + assert_eq!( + 1, + db_rows + .iter() + .filter(|m| m.context_id == 1 && m.min_pinned_id == 111) + .count() + ); + assert_eq!(111, map.get(&1).unwrap().min_pinned_id); + } + + #[tokio::test] + async fn test_tree_map_entry_insert_transaction_commit() { + let mut map: BTreeMap = BTreeMap::new(); + + let mut map_txn = BTreeMapTransaction::new(&mut map); + let first_entry_txn = map_txn.new_entry_insert_txn( + 1, + HummockPinnedVersion { + context_id: 1, + min_pinned_id: 11, + }, + ); + let store = SqlMetaStore::for_test().await; + let db = &store.conn; + let mut txn = db.begin().await.unwrap(); + first_entry_txn.apply_to_txn(&mut txn).await.unwrap(); + txn.commit().await.unwrap(); + first_entry_txn.commit(); + assert_eq!(11, map.get(&1).unwrap().min_pinned_id); + } +} diff --git a/src/meta/src/model_v2/user.rs b/src/meta/src/model_v2/user.rs new file mode 100644 index 0000000000000..0e7ab4dd17876 --- /dev/null +++ b/src/meta/src/model_v2/user.rs @@ -0,0 +1,45 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::UserId; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "user")] +pub struct Model { + #[sea_orm(primary_key)] + pub user_id: UserId, + pub name: String, + pub is_super: bool, + pub can_create_db: bool, + pub can_create_user: bool, + pub can_login: bool, + pub auth_type: Option, + pub auth_value: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::object::Entity")] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/user_privilege.rs b/src/meta/src/model_v2/user_privilege.rs new file mode 100644 index 0000000000000..335f716cec1c8 --- /dev/null +++ b/src/meta/src/model_v2/user_privilege.rs @@ -0,0 +1,65 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{ObjectId, UserId}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "user_privilege")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub user_id: UserId, + pub oid: ObjectId, + pub granted_by: UserId, + pub actions: String, + pub with_grant_option: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::Oid", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "NoAction" + )] + Object, + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::GrantedBy", + to = "super::user::Column::UserId", + on_update = "NoAction", + on_delete = "NoAction" + )] + User2, + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::UserId", + to = "super::user::Column::UserId", + on_update = "NoAction", + on_delete = "Cascade" + )] + User1, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/view.rs b/src/meta/src/model_v2/view.rs new file mode 100644 index 0000000000000..8f7d22408d3f2 --- /dev/null +++ b/src/meta/src/model_v2/view.rs @@ -0,0 +1,62 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::catalog::PbView; +use sea_orm::entity::prelude::*; +use sea_orm::ActiveValue; + +use crate::model_v2::{FieldArray, Property, ViewId}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "view")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub view_id: ViewId, + pub name: String, + pub properties: Property, + pub definition: String, + pub columns: FieldArray, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::object::Entity", + from = "Column::ViewId", + to = "super::object::Column::Oid", + on_update = "NoAction", + on_delete = "Cascade" + )] + Object, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} + +impl From for ActiveModel { + fn from(view: PbView) -> Self { + Self { + view_id: ActiveValue::Set(view.id as _), + name: ActiveValue::Set(view.name), + properties: ActiveValue::Set(Property(view.properties)), + definition: ActiveValue::Set(view.sql), + columns: ActiveValue::Set(FieldArray(view.columns)), + } + } +} diff --git a/src/meta/src/model_v2/worker.rs b/src/meta/src/model_v2/worker.rs new file mode 100644 index 0000000000000..08cdb2be34da1 --- /dev/null +++ b/src/meta/src/model_v2/worker.rs @@ -0,0 +1,67 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{TransactionId, WorkerId}; + +#[derive(Clone, Debug, Hash, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum WorkerType { + #[sea_orm(string_value = "FRONTEND")] + Frontend, + #[sea_orm(string_value = "COMPUTE_NODE")] + ComputeNode, + #[sea_orm(string_value = "RISE_CTL")] + RiseCtl, + #[sea_orm(string_value = "COMPACTOR")] + Compactor, + #[sea_orm(string_value = "META")] + Meta, +} + +#[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "String", db_type = "String(None)")] +pub enum WorkerStatus { + #[sea_orm(string_value = "STARTING")] + Starting, + #[sea_orm(string_value = "RUNNING")] + Running, +} + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "worker")] +pub struct Model { + #[sea_orm(primary_key)] + pub worker_id: WorkerId, + pub worker_type: WorkerType, + pub host: String, + pub port: i32, + pub status: WorkerStatus, + pub transaction_id: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::worker_property::Entity")] + WorkerProperty, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::WorkerProperty.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/model_v2/worker_property.rs b/src/meta/src/model_v2/worker_property.rs new file mode 100644 index 0000000000000..8521cbed15ce2 --- /dev/null +++ b/src/meta/src/model_v2/worker_property.rs @@ -0,0 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sea_orm::entity::prelude::*; + +use crate::model_v2::{I32Array, WorkerId}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "worker_property")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub worker_id: WorkerId, + pub parallel_unit_ids: I32Array, + pub is_streaming: bool, + pub is_serving: bool, + pub is_unschedulable: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::worker::Entity", + from = "Column::WorkerId", + to = "super::worker::Column::WorkerId", + on_update = "NoAction", + on_delete = "Cascade" + )] + Worker, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Worker.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/src/meta/src/rpc/ddl_controller.rs b/src/meta/src/rpc/ddl_controller.rs index a11fe815609cd..36615bd93b757 100644 --- a/src/meta/src/rpc/ddl_controller.rs +++ b/src/meta/src/rpc/ddl_controller.rs @@ -15,6 +15,7 @@ use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; +use std::time::Duration; use itertools::Itertools; use risingwave_common::config::DefaultParallelism; @@ -23,12 +24,13 @@ use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_common::util::epoch::Epoch; use risingwave_pb::catalog::connection::private_link_service::PbPrivateLinkProvider; use risingwave_pb::catalog::{ - connection, Connection, Database, Function, Schema, Source, Table, View, + connection, Connection, CreateType, Database, Function, Schema, Source, Table, View, }; use risingwave_pb::ddl_service::alter_relation_name_request::Relation; use risingwave_pb::ddl_service::DdlProgress; use risingwave_pb::stream_plan::StreamFragmentGraph as StreamFragmentGraphProto; use tokio::sync::Semaphore; +use tokio::time::sleep; use tracing::log::warn; use tracing::Instrument; @@ -37,6 +39,7 @@ use crate::manager::{ CatalogManagerRef, ClusterManagerRef, ConnectionId, DatabaseId, FragmentManagerRef, FunctionId, IdCategory, IndexId, LocalNotification, MetaSrvEnv, NotificationVersion, RelationIdEnum, SchemaId, SinkId, SourceId, StreamingClusterInfo, StreamingJob, TableId, ViewId, + IGNORED_NOTIFICATION_VERSION, }; use crate::model::{StreamEnvironment, TableFragments}; use crate::rpc::cloud_provider::AwsEc2Client; @@ -47,6 +50,7 @@ use crate::stream::{ }; use crate::{MetaError, MetaResult}; +#[derive(PartialEq)] pub enum DropMode { Restrict, Cascade, @@ -92,7 +96,7 @@ pub enum DdlCommand { DropFunction(FunctionId), CreateView(View), DropView(ViewId, DropMode), - CreateStreamingJob(StreamingJob, StreamFragmentGraphProto), + CreateStreamingJob(StreamingJob, StreamFragmentGraphProto, CreateType), DropStreamingJob(StreamingJobId, DropMode), ReplaceTable(StreamingJob, StreamFragmentGraphProto, ColIndexMapping), AlterRelationName(Relation, String), @@ -177,7 +181,7 @@ impl CreatingStreamingJobPermit { } impl DdlController { - pub(crate) async fn new( + pub async fn new( env: MetaSrvEnv, catalog_manager: CatalogManagerRef, stream_manager: GlobalStreamManagerRef, @@ -216,7 +220,7 @@ impl DdlController { /// has been interrupted during executing, the request will be cancelled by tonic. Since we have /// a lot of logic for revert, status management, notification and so on, ensuring consistency /// would be a huge hassle and pain if we don't spawn here. - pub(crate) async fn run_command(&self, command: DdlCommand) -> MetaResult { + pub async fn run_command(&self, command: DdlCommand) -> MetaResult { self.check_barrier_manager_status().await?; let ctrl = self.clone(); let fut = async move { @@ -235,8 +239,9 @@ impl DdlController { DdlCommand::DropView(view_id, drop_mode) => { ctrl.drop_view(view_id, drop_mode).await } - DdlCommand::CreateStreamingJob(stream_job, fragment_graph) => { - ctrl.create_streaming_job(stream_job, fragment_graph).await + DdlCommand::CreateStreamingJob(stream_job, fragment_graph, create_type) => { + ctrl.create_streaming_job(stream_job, fragment_graph, create_type) + .await } DdlCommand::DropStreamingJob(job_id, drop_mode) => { ctrl.drop_streaming_job(job_id, drop_mode).await @@ -261,7 +266,7 @@ impl DdlController { tokio::spawn(fut).await.unwrap() } - pub(crate) async fn get_ddl_progress(&self) -> Vec { + pub async fn get_ddl_progress(&self) -> Vec { self.barrier_manager.get_ddl_progress().await } @@ -321,20 +326,25 @@ impl DdlController { drop_mode: DropMode, ) -> MetaResult { // 1. Drop source in catalog. - let version = self + let (version, streaming_job_ids) = self .catalog_manager .drop_relation( RelationIdEnum::Source(source_id), self.fragment_manager.clone(), drop_mode, ) - .await? - .0; + .await?; + // 2. Unregister source connector worker. self.source_manager .unregister_sources(vec![source_id]) .await; + // 3. Drop streaming jobs if cascade + self.stream_manager + .drop_streaming_jobs(streaming_job_ids) + .await; + Ok(version) } @@ -404,7 +414,13 @@ impl DdlController { &self, mut stream_job: StreamingJob, fragment_graph: StreamFragmentGraphProto, + create_type: CreateType, ) -> MetaResult { + tracing::debug!( + id = stream_job.id(), + definition = stream_job.definition(), + "starting stream job", + ); let _permit = self .creating_streaming_job_permits .semaphore @@ -414,6 +430,8 @@ impl DdlController { let _reschedule_job_lock = self.stream_manager.reschedule_lock.read().await; let env = StreamEnvironment::from_protobuf(fragment_graph.get_env().unwrap()); + + tracing::debug!(id = stream_job.id(), "preparing stream job"); let fragment_graph = self .prepare_stream_job(&mut stream_job, fragment_graph) .await?; @@ -423,38 +441,100 @@ impl DdlController { let mut internal_tables = vec![]; let result = try { + tracing::debug!(id = stream_job.id(), "building stream job"); let (ctx, table_fragments) = self .build_stream_job(env, &stream_job, fragment_graph) .await?; internal_tables = ctx.internal_tables(); - match &stream_job { - StreamingJob::Table(Some(source), _) => { + match stream_job { + StreamingJob::Table(Some(ref source), _) => { // Register the source on the connector node. self.source_manager.register_source(source).await?; } - StreamingJob::Sink(sink) => { + StreamingJob::Sink(ref sink) => { // Validate the sink on the connector node. - validate_sink(sink, self.env.connector_client()).await?; + validate_sink(sink).await?; } _ => {} } + (ctx, table_fragments) + }; - self.stream_manager - .create_streaming_job(table_fragments, ctx) - .await?; + let (ctx, table_fragments) = match result { + Ok(r) => r, + Err(e) => { + self.cancel_stream_job(&stream_job, internal_tables).await?; + return Err(e); + } }; - match result { - Ok(_) => self.finish_stream_job(stream_job, internal_tables).await, - Err(err) => { - self.cancel_stream_job(&stream_job, internal_tables).await; - Err(err) + match create_type { + CreateType::Foreground | CreateType::Unspecified => { + self.create_streaming_job_inner(stream_job, table_fragments, ctx, internal_tables) + .await + } + CreateType::Background => { + let ctrl = self.clone(); + let stream_job_id = stream_job.id(); + let fut = async move { + let result = ctrl + .create_streaming_job_inner( + stream_job, + table_fragments, + ctx, + internal_tables, + ) + .await; + match result { + Err(e) => { + tracing::error!(id=stream_job_id, error = ?e, "finish stream job failed") + } + Ok(_) => { + tracing::info!(id = stream_job_id, "finish stream job succeeded") + } + } + }; + tokio::spawn(fut); + Ok(IGNORED_NOTIFICATION_VERSION) } } } + // We persist table fragments at this step. + async fn create_streaming_job_inner( + &self, + stream_job: StreamingJob, + table_fragments: TableFragments, + ctx: CreateStreamingJobContext, + internal_tables: Vec
, + ) -> MetaResult { + let job_id = stream_job.id(); + tracing::debug!(id = job_id, "creating stream job"); + let result = self + .stream_manager + .create_streaming_job(table_fragments, ctx) + .await; + if let Err(e) = result { + match stream_job.create_type() { + // NOTE: This assumes that we will trigger recovery, + // and recover stream job progress. + CreateType::Background => { + tracing::error!(id = stream_job.id(), error = ?e, "finish stream job failed") + } + _ => { + self.cancel_stream_job(&stream_job, internal_tables).await?; + } + } + return Err(e); + }; + tracing::debug!(id = job_id, "finishing stream job"); + let version = self.finish_stream_job(stream_job, internal_tables).await?; + tracing::debug!(id = job_id, "finished stream job"); + Ok(version) + } + async fn drop_streaming_job( &self, job_id: StreamingJobId, @@ -517,6 +597,8 @@ impl DdlController { StreamFragmentGraph::new(fragment_graph, self.env.id_gen_manager_ref(), stream_job) .await?; + let internal_tables = fragment_graph.internal_tables().into_values().collect_vec(); + // 2. Set the graph-related fields and freeze the `stream_job`. stream_job.set_table_fragment_id(fragment_graph.table_fragment_id()); stream_job.set_dml_fragment_id(fragment_graph.dml_fragment_id()); @@ -524,7 +606,7 @@ impl DdlController { // 3. Mark current relation as "creating" and add reference count to dependent relations. self.catalog_manager - .start_create_stream_job_procedure(stream_job) + .start_create_stream_job_procedure(stream_job, internal_tables) .await?; Ok(fragment_graph) @@ -633,6 +715,7 @@ impl DdlController { table_properties: stream_job.properties(), definition: stream_job.definition(), mv_table_id: stream_job.mv_table(), + create_type: stream_job.create_type(), }; // 4. Mark creating tables, including internal tables and the table of the stream job. @@ -649,17 +732,27 @@ impl DdlController { Ok((ctx, table_fragments)) } - /// `cancel_stream_job` cancels a stream job and clean some states. - async fn cancel_stream_job(&self, stream_job: &StreamingJob, internal_tables: Vec
) { + /// This is NOT used by `CANCEL JOBS`. + /// It is used internally by `DdlController` to cancel and cleanup stream job. + async fn cancel_stream_job( + &self, + stream_job: &StreamingJob, + internal_tables: Vec
, + ) -> MetaResult<()> { let mut creating_internal_table_ids = internal_tables.into_iter().map(|t| t.id).collect_vec(); // 1. cancel create procedure. match stream_job { StreamingJob::MaterializedView(table) => { - creating_internal_table_ids.push(table.id); - self.catalog_manager - .cancel_create_table_procedure(table) + // barrier manager will do the cleanup. + let result = self + .catalog_manager + .cancel_create_table_procedure(table.id, creating_internal_table_ids.clone()) .await; + creating_internal_table_ids.push(table.id); + if let Err(e) = result { + tracing::warn!("Failed to cancel create table procedure, perhaps barrier manager has already cleaned it. Reason: {e:#?}"); + } } StreamingJob::Sink(sink) => { self.catalog_manager @@ -667,16 +760,23 @@ impl DdlController { .await; } StreamingJob::Table(source, table) => { - creating_internal_table_ids.push(table.id); if let Some(source) = source { self.catalog_manager .cancel_create_table_procedure_with_source(source, table) .await; } else { - self.catalog_manager - .cancel_create_table_procedure(table) + let result = self + .catalog_manager + .cancel_create_table_procedure( + table.id, + creating_internal_table_ids.clone(), + ) .await; + if let Err(e) = result { + tracing::warn!("Failed to cancel create table procedure, perhaps barrier manager has already cleaned it. Reason: {e:#?}"); + } } + creating_internal_table_ids.push(table.id); } StreamingJob::Index(index, table) => { creating_internal_table_ids.push(table.id); @@ -689,6 +789,7 @@ impl DdlController { self.catalog_manager .unmark_creating_tables(&creating_internal_table_ids, true) .await; + Ok(()) } /// `finish_stream_job` finishes a stream job and clean some states. @@ -836,7 +937,7 @@ impl DdlController { // 3. Mark current relation as "updating". self.catalog_manager - .start_replace_table_procedure(stream_job.table().unwrap()) + .start_replace_table_procedure(stream_job) .await?; Ok(fragment_graph) @@ -947,22 +1048,18 @@ impl DdlController { stream_job: &StreamingJob, table_col_index_mapping: ColIndexMapping, ) -> MetaResult { - let StreamingJob::Table(None, table) = stream_job else { + let StreamingJob::Table(source, table) = stream_job else { unreachable!("unexpected job: {stream_job:?}") }; self.catalog_manager - .finish_replace_table_procedure(table, table_col_index_mapping) + .finish_replace_table_procedure(source, table, table_col_index_mapping) .await } async fn cancel_replace_table(&self, stream_job: &StreamingJob) -> MetaResult<()> { - let StreamingJob::Table(None, table) = stream_job else { - unreachable!("unexpected job: {stream_job:?}") - }; - self.catalog_manager - .cancel_replace_table_procedure(table) + .cancel_replace_table_procedure(stream_job) .await } @@ -999,4 +1096,18 @@ impl DdlController { } } } + + pub async fn wait(&self) { + for _ in 0..30 * 60 { + if self + .catalog_manager + .list_creating_background_mvs() + .await + .is_empty() + { + break; + } + sleep(Duration::from_secs(1)).await; + } + } } diff --git a/src/meta/src/rpc/election_client.rs b/src/meta/src/rpc/election/etcd.rs similarity index 96% rename from src/meta/src/rpc/election_client.rs rename to src/meta/src/rpc/election/etcd.rs index fdc9f9b3bc2dd..f30d8253cb95d 100644 --- a/src/meta/src/rpc/election_client.rs +++ b/src/meta/src/rpc/election/etcd.rs @@ -18,33 +18,15 @@ use std::time::Duration; use etcd_client::{ConnectOptions, Error, GetOptions, LeaderKey, ResignOptions}; use risingwave_common::bail; -use serde::Serialize; use tokio::sync::watch::Receiver; use tokio::sync::{oneshot, watch}; use tokio::time; use tokio_stream::StreamExt; +use crate::rpc::election::{ElectionClient, ElectionMember, META_ELECTION_KEY}; use crate::storage::WrappedEtcdClient; use crate::MetaResult; -const META_ELECTION_KEY: &str = "__meta_election_"; - -#[derive(Debug, Serialize)] -pub struct ElectionMember { - pub id: String, - pub is_leader: bool, -} - -#[async_trait::async_trait] -pub trait ElectionClient: Send + Sync + 'static { - fn id(&self) -> MetaResult; - async fn run_once(&self, ttl: i64, stop: Receiver<()>) -> MetaResult<()>; - fn subscribe(&self) -> Receiver; - async fn leader(&self) -> MetaResult>; - async fn get_members(&self) -> MetaResult>; - async fn is_leader(&self) -> bool; -} - pub struct EtcdElectionClient { id: String, is_leader_sender: watch::Sender, @@ -367,7 +349,8 @@ mod tests { use tokio::sync::watch::Sender; use tokio::time; - use crate::rpc::election_client::{ElectionClient, EtcdElectionClient, META_ELECTION_KEY}; + use crate::rpc::election::etcd::EtcdElectionClient; + use crate::rpc::election::{ElectionClient, META_ELECTION_KEY}; type ElectionHandle = (Sender<()>, Arc); diff --git a/src/meta/src/rpc/election/mod.rs b/src/meta/src/rpc/election/mod.rs new file mode 100644 index 0000000000000..7916ddba6eea4 --- /dev/null +++ b/src/meta/src/rpc/election/mod.rs @@ -0,0 +1,42 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +pub mod etcd; +pub mod sql; + +use serde::Serialize; +use tokio::sync::watch::Receiver; + +use crate::MetaResult; + +const META_ELECTION_KEY: &str = "__meta_election_"; + +#[derive(Debug, Serialize)] +pub struct ElectionMember { + pub id: String, + pub is_leader: bool, +} + +#[async_trait::async_trait] +pub trait ElectionClient: Send + Sync + 'static { + async fn init(&self) -> MetaResult<()> { + Ok(()) + } + + fn id(&self) -> MetaResult; + async fn run_once(&self, ttl: i64, stop: Receiver<()>) -> MetaResult<()>; + fn subscribe(&self) -> Receiver; + async fn leader(&self) -> MetaResult>; + async fn get_members(&self) -> MetaResult>; + async fn is_leader(&self) -> bool; +} diff --git a/src/meta/src/rpc/election/sql.rs b/src/meta/src/rpc/election/sql.rs new file mode 100644 index 0000000000000..a027e8bffdfd1 --- /dev/null +++ b/src/meta/src/rpc/election/sql.rs @@ -0,0 +1,815 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; +use std::time::Duration; + +use anyhow::anyhow; +use sea_orm::{ + ConnectionTrait, DatabaseBackend, DatabaseConnection, FromQueryResult, Statement, + TransactionTrait, Value, +}; +use tokio::sync::watch; +use tokio::sync::watch::Receiver; +use tokio::time; + +use crate::rpc::election::META_ELECTION_KEY; +use crate::{ElectionClient, ElectionMember, MetaResult}; + +pub struct SqlBackendElectionClient { + id: String, + driver: Arc, + is_leader_sender: watch::Sender, +} + +impl SqlBackendElectionClient { + pub fn new(id: String, driver: Arc) -> Self { + let (sender, _) = watch::channel(false); + Self { + id, + driver, + is_leader_sender: sender, + } + } +} + +#[derive(Debug, FromQueryResult)] +pub struct ElectionRow { + service: String, + id: String, +} + +#[async_trait::async_trait] +pub trait SqlDriver: Send + Sync + 'static { + async fn init_database(&self) -> MetaResult<()>; + + async fn update_heartbeat(&self, service_name: &str, id: &str) -> MetaResult<()>; + + async fn try_campaign(&self, service_name: &str, id: &str, ttl: i64) + -> MetaResult; + async fn leader(&self, service_name: &str) -> MetaResult>; + + async fn candidates(&self, service_name: &str) -> MetaResult>; + + async fn resign(&self, service_name: &str, id: &str) -> MetaResult<()>; +} + +pub trait SqlDriverCommon { + const ELECTION_LEADER_TABLE_NAME: &'static str = "election_leader"; + const ELECTION_MEMBER_TABLE_NAME: &'static str = "election_member"; + + fn election_table_name() -> &'static str { + Self::ELECTION_LEADER_TABLE_NAME + } + fn member_table_name() -> &'static str { + Self::ELECTION_MEMBER_TABLE_NAME + } +} + +impl SqlDriverCommon for MySqlDriver {} + +impl SqlDriverCommon for PostgresDriver {} + +impl SqlDriverCommon for SqliteDriver {} + +pub struct MySqlDriver { + pub(crate) conn: DatabaseConnection, +} + +impl MySqlDriver { + pub fn new(conn: DatabaseConnection) -> Arc { + Arc::new(Self { conn }) + } +} + +pub struct PostgresDriver { + pub(crate) conn: DatabaseConnection, +} + +impl PostgresDriver { + pub fn new(conn: DatabaseConnection) -> Arc { + Arc::new(Self { conn }) + } +} + +pub struct SqliteDriver { + pub(crate) conn: DatabaseConnection, +} + +impl SqliteDriver { + pub fn new(conn: DatabaseConnection) -> Arc { + Arc::new(Self { conn }) + } +} + +#[async_trait::async_trait] +impl SqlDriver for SqliteDriver { + async fn init_database(&self) -> MetaResult<()> { + self.conn.execute( + Statement::from_string(DatabaseBackend::Sqlite, format!( + r#"CREATE TABLE IF NOT EXISTS {table} (service VARCHAR(256), id VARCHAR(256), last_heartbeat DATETIME, PRIMARY KEY (service, id));"#, + table = Self::member_table_name() + ))).await?; + + self.conn.execute( + Statement::from_string(DatabaseBackend::Sqlite, format!( + r#"CREATE TABLE IF NOT EXISTS {table} (service VARCHAR(256), id VARCHAR(256), last_heartbeat DATETIME, PRIMARY KEY (service));"#, + table = Self::election_table_name() + ))).await?; + + Ok(()) + } + + async fn update_heartbeat(&self, service_name: &str, id: &str) -> MetaResult<()> { + self.conn + .execute(Statement::from_sql_and_values( + DatabaseBackend::Sqlite, + format!( + r#"INSERT INTO {table} (id, service, last_heartbeat) +VALUES($1, $2, CURRENT_TIMESTAMP) +ON CONFLICT (id, service) +DO + UPDATE SET last_heartbeat = EXCLUDED.last_heartbeat; +"#, + table = Self::member_table_name() + ), + vec![Value::from(id), Value::from(service_name)], + )) + .await?; + Ok(()) + } + + async fn try_campaign( + &self, + service_name: &str, + id: &str, + ttl: i64, + ) -> MetaResult { + let query_result = self.conn + .query_one(Statement::from_sql_and_values( + DatabaseBackend::Sqlite, + format!( + r#"INSERT INTO {table} (service, id, last_heartbeat) + VALUES ($1, $2, CURRENT_TIMESTAMP) + ON CONFLICT (service) + DO UPDATE + SET id = CASE + WHEN DATETIME({table}.last_heartbeat, '+' || $3 || ' second') < CURRENT_TIMESTAMP THEN EXCLUDED.id + ELSE {table}.id + END, + last_heartbeat = CASE + WHEN DATETIME({table}.last_heartbeat, '+' || $3 || ' seconds') < CURRENT_TIMESTAMP THEN EXCLUDED.last_heartbeat + WHEN {table}.id = EXCLUDED.id THEN EXCLUDED.last_heartbeat + ELSE {table}.last_heartbeat + END + RETURNING service, id, last_heartbeat; + "#, + table = Self::election_table_name() + ), + vec![Value::from(service_name), Value::from(id), Value::from(ttl)], + )) + .await?; + + let row = query_result + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .transpose()?; + + let row = row.ok_or_else(|| anyhow!("bad result from sqlite"))?; + + Ok(row) + } + + async fn leader(&self, service_name: &str) -> MetaResult> { + let query_result = self + .conn + .query_one(Statement::from_sql_and_values( + DatabaseBackend::Sqlite, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = $1;"#, + table = Self::election_table_name() + ), + vec![Value::from(service_name)], + )) + .await?; + + let row = query_result + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .transpose()?; + + Ok(row) + } + + async fn candidates(&self, service_name: &str) -> MetaResult> { + let all = self + .conn + .query_all(Statement::from_sql_and_values( + DatabaseBackend::Sqlite, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = $1;"#, + table = Self::member_table_name() + ), + vec![Value::from(service_name)], + )) + .await?; + + let rows = all + .into_iter() + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .collect::>()?; + + Ok(rows) + } + + async fn resign(&self, service_name: &str, id: &str) -> MetaResult<()> { + let txn = self.conn.begin().await?; + + txn.execute(Statement::from_sql_and_values( + DatabaseBackend::Sqlite, + format!( + r#" + DELETE FROM {table} WHERE service = $1 AND id = $2; + "#, + table = Self::election_table_name() + ), + vec![Value::from(service_name), Value::from(id)], + )) + .await?; + + txn.execute(Statement::from_sql_and_values( + DatabaseBackend::Sqlite, + format!( + r#" + DELETE FROM {table} WHERE service = $1 AND id = $2; + "#, + table = Self::member_table_name() + ), + vec![Value::from(service_name), Value::from(id)], + )) + .await?; + + txn.commit().await?; + + Ok(()) + } +} + +#[async_trait::async_trait] +impl SqlDriver for MySqlDriver { + async fn init_database(&self) -> MetaResult<()> { + self.conn.execute( + Statement::from_string(DatabaseBackend::MySql, format!( + r#"CREATE TABLE IF NOT EXISTS {table} (service VARCHAR(256), id VARCHAR(256), last_heartbeat DATETIME, PRIMARY KEY (service, id));"#, + table = Self::member_table_name() + ))).await?; + + self.conn.execute( + Statement::from_string(DatabaseBackend::MySql, format!( + r#"CREATE TABLE IF NOT EXISTS {table} (service VARCHAR(256), id VARCHAR(256), last_heartbeat DATETIME, PRIMARY KEY (service));"#, + table = Self::election_table_name() + ))).await?; + + Ok(()) + } + + async fn update_heartbeat(&self, service_name: &str, id: &str) -> MetaResult<()> { + self.conn + .execute(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#"INSERT INTO {table} (id, service, last_heartbeat) + VALUES(?, ?, NOW()) + ON duplicate KEY + UPDATE last_heartbeat = VALUES(last_heartbeat); + "#, + table = Self::member_table_name() + ), + vec![Value::from(id), Value::from(service_name)], + )) + .await?; + + Ok(()) + } + + async fn try_campaign( + &self, + service_name: &str, + id: &str, + ttl: i64, + ) -> MetaResult { + self.conn + .execute(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#"INSERT + IGNORE + INTO {table} (service, id, last_heartbeat) + VALUES (?, ?, NOW()) + ON duplicate KEY + UPDATE id = if(last_heartbeat < NOW() - INTERVAL ? SECOND, + VALUES(id), id), + last_heartbeat = if(id = + VALUES(id), + VALUES(last_heartbeat), last_heartbeat);"#, + table = Self::election_table_name() + ), + vec![Value::from(service_name), Value::from(id), Value::from(ttl)], + )) + .await?; + + let query_result = self + .conn + .query_one(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = ?;"#, + table = Self::election_table_name(), + ), + vec![Value::from(service_name)], + )) + .await?; + + let row = query_result + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .transpose()?; + + let row = row.ok_or_else(|| anyhow!("bad result from mysql"))?; + + Ok(row) + } + + async fn leader(&self, service_name: &str) -> MetaResult> { + let query_result = self + .conn + .query_one(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = ?;"#, + table = Self::election_table_name() + ), + vec![Value::from(service_name)], + )) + .await?; + + let row = query_result + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .transpose()?; + + Ok(row) + } + + async fn candidates(&self, service_name: &str) -> MetaResult> { + let all = self + .conn + .query_all(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = ?;"#, + table = Self::member_table_name() + ), + vec![Value::from(service_name)], + )) + .await?; + + let rows = all + .into_iter() + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .collect::>()?; + + Ok(rows) + } + + async fn resign(&self, service_name: &str, id: &str) -> MetaResult<()> { + let txn = self.conn.begin().await?; + + txn.execute(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#" + DELETE FROM {table} WHERE service = ? AND id = ?; + "#, + table = Self::election_table_name() + ), + vec![Value::from(service_name), Value::from(id)], + )) + .await?; + + txn.execute(Statement::from_sql_and_values( + DatabaseBackend::MySql, + format!( + r#" + DELETE FROM {table} WHERE service = ? AND id = ?; + "#, + table = Self::member_table_name() + ), + vec![Value::from(service_name), Value::from(id)], + )) + .await?; + + txn.commit().await?; + + Ok(()) + } +} + +#[async_trait::async_trait] +impl SqlDriver for PostgresDriver { + async fn init_database(&self) -> MetaResult<()> { + self.conn.execute( + Statement::from_string(DatabaseBackend::Postgres, format!( + r#"CREATE TABLE IF NOT EXISTS {table} (service VARCHAR, id VARCHAR, last_heartbeat TIMESTAMPTZ, PRIMARY KEY (service, id));"#, + table = Self::member_table_name() + ))).await?; + + self.conn.execute( + Statement::from_string(DatabaseBackend::Postgres, format!( + r#"CREATE TABLE IF NOT EXISTS {table} (service VARCHAR, id VARCHAR, last_heartbeat TIMESTAMPTZ, PRIMARY KEY (service));"#, + table = Self::election_table_name() + ))).await?; + + Ok(()) + } + + async fn update_heartbeat(&self, service_name: &str, id: &str) -> MetaResult<()> { + self.conn + .execute(Statement::from_sql_and_values( + DatabaseBackend::Postgres, + format!( + r#"INSERT INTO {table} (id, service, last_heartbeat) + VALUES($1, $2, NOW()) + ON CONFLICT (id, service) + DO + UPDATE SET last_heartbeat = EXCLUDED.last_heartbeat; + "#, + table = Self::member_table_name() + ), + vec![Value::from(id), Value::from(service_name)], + )) + .await?; + + Ok(()) + } + + async fn try_campaign( + &self, + service_name: &str, + id: &str, + ttl: i64, + ) -> MetaResult { + let query_result = self + .conn + .query_one(Statement::from_sql_and_values( + DatabaseBackend::Postgres, + format!( + r#"INSERT INTO {table} (service, id, last_heartbeat) + VALUES ($1, $2, NOW()) + ON CONFLICT (service) + DO UPDATE + SET id = CASE + WHEN {table}.last_heartbeat < NOW() - $3::INTERVAL THEN EXCLUDED.id + ELSE {table}.id + END, + last_heartbeat = CASE + WHEN {table}.last_heartbeat < NOW() - $3::INTERVAL THEN EXCLUDED.last_heartbeat + WHEN {table}.id = EXCLUDED.id THEN EXCLUDED.last_heartbeat + ELSE {table}.last_heartbeat + END + RETURNING service, id, last_heartbeat; + "#, + table = Self::election_table_name() + ), + vec![ + Value::from(service_name), + Value::from(id), + // special handling for interval + Value::from(ttl.to_string()), + ], + )) + .await?; + + let row = query_result + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .transpose()?; + + let row = row.ok_or_else(|| anyhow!("bad result from postgres"))?; + + Ok(row) + } + + async fn leader(&self, service_name: &str) -> MetaResult> { + let query_result = self + .conn + .query_one(Statement::from_sql_and_values( + DatabaseBackend::Postgres, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = $1;"#, + table = Self::election_table_name() + ), + vec![Value::from(service_name)], + )) + .await?; + + let row = query_result + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .transpose()?; + + Ok(row) + } + + async fn candidates(&self, service_name: &str) -> MetaResult> { + let all = self + .conn + .query_all(Statement::from_sql_and_values( + DatabaseBackend::Postgres, + format!( + r#"SELECT service, id, last_heartbeat FROM {table} WHERE service = $1;"#, + table = Self::member_table_name() + ), + vec![Value::from(service_name)], + )) + .await?; + + let rows = all + .into_iter() + .map(|query_result| ElectionRow::from_query_result(&query_result, "")) + .collect::>()?; + + Ok(rows) + } + + async fn resign(&self, service_name: &str, id: &str) -> MetaResult<()> { + let txn = self.conn.begin().await?; + + txn.execute(Statement::from_sql_and_values( + DatabaseBackend::Postgres, + format!( + r#" + DELETE FROM {table} WHERE service = $1 AND id = $2; + "#, + table = Self::election_table_name() + ), + vec![Value::from(service_name), Value::from(id)], + )) + .await?; + + txn.execute(Statement::from_sql_and_values( + DatabaseBackend::Postgres, + format!( + r#" + DELETE FROM {table} WHERE service = $1 AND id = $2; + "#, + table = Self::member_table_name() + ), + vec![Value::from(service_name), Value::from(id)], + )) + .await?; + + txn.commit().await?; + + Ok(()) + } +} + +#[async_trait::async_trait] +impl ElectionClient for SqlBackendElectionClient +where + T: SqlDriver + Send + Sync + 'static, +{ + async fn init(&self) -> MetaResult<()> { + tracing::info!("initializing database for Sql backend election client"); + self.driver.init_database().await + } + + fn id(&self) -> MetaResult { + Ok(self.id.clone()) + } + + async fn run_once(&self, ttl: i64, stop: Receiver<()>) -> MetaResult<()> { + let stop = stop.clone(); + + let member_refresh_driver = self.driver.clone(); + + let id = self.id.clone(); + + let mut member_refresh_stop = stop.clone(); + + let handle = tokio::spawn(async move { + let mut ticker = tokio::time::interval(Duration::from_secs(1)); + + loop { + tokio::select! { + _ = ticker.tick() => { + + if let Err(e) = member_refresh_driver + .update_heartbeat(META_ELECTION_KEY, id.as_str()) + .await { + + tracing::debug!("keep alive for member {} failed {}", id, e); + continue + } + } + _ = member_refresh_stop.changed() => { + return; + } + } + } + }); + + let _guard = scopeguard::guard(handle, |handle| handle.abort()); + + self.is_leader_sender.send_replace(false); + + let mut timeout_ticker = time::interval(Duration::from_secs_f64(ttl as f64 / 2.0)); + timeout_ticker.reset(); + let mut stop = stop.clone(); + + let mut is_leader = false; + + let mut election_ticker = time::interval(Duration::from_secs(1)); + + loop { + tokio::select! { + _ = election_ticker.tick() => { + let election_row = self + .driver + .try_campaign(META_ELECTION_KEY, self.id.as_str(), ttl) + .await?; + + assert_eq!(election_row.service, META_ELECTION_KEY); + + if election_row.id.eq_ignore_ascii_case(self.id.as_str()) { + if !is_leader{ + self.is_leader_sender.send_replace(true); + is_leader = true; + } + } else if is_leader { + tracing::warn!("leader has been changed to {}", election_row.id); + break; + } + + timeout_ticker.reset(); + } + _ = timeout_ticker.tick() => { + tracing::error!("member {} election timeout", self.id); + break; + } + _ = stop.changed() => { + tracing::info!("stop signal received when observing"); + + if is_leader { + tracing::info!("leader {} resigning", self.id); + if let Err(e) = self.driver.resign(META_ELECTION_KEY, self.id.as_str()).await { + tracing::warn!("resign failed {}", e); + } + } + + return Ok(()); + } + } + } + self.is_leader_sender.send_replace(false); + + return Ok(()); + } + + fn subscribe(&self) -> Receiver { + self.is_leader_sender.subscribe() + } + + async fn leader(&self) -> MetaResult> { + let row = self.driver.leader(META_ELECTION_KEY).await?; + Ok(row.map(|row| ElectionMember { + id: row.id, + is_leader: true, + })) + } + + async fn get_members(&self) -> MetaResult> { + let leader = self.leader().await?; + let members = self.driver.candidates(META_ELECTION_KEY).await?; + + Ok(members + .into_iter() + .map(|row| { + let is_leader = leader + .as_ref() + .map(|leader| leader.id.eq_ignore_ascii_case(row.id.as_str())) + .unwrap_or(false); + + ElectionMember { + id: row.id, + is_leader, + } + }) + .collect()) + } + + async fn is_leader(&self) -> bool { + *self.is_leader_sender.borrow() + } +} + +#[cfg(not(madsim))] +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use sea_orm::{ConnectionTrait, Database, DatabaseConnection, DbBackend, Statement}; + use tokio::sync::watch; + + use crate::rpc::election::sql::{SqlBackendElectionClient, SqlDriverCommon, SqliteDriver}; + use crate::{ElectionClient, MetaResult}; + + async fn prepare_sqlite_env() -> MetaResult { + let db: DatabaseConnection = Database::connect("sqlite::memory:").await?; + + db.execute(Statement::from_sql_and_values( + DbBackend::Sqlite, + format!("CREATE TABLE {table} (service VARCHAR(256) PRIMARY KEY, id VARCHAR(256), last_heartbeat DATETIME)", + table = SqliteDriver::election_table_name()), + vec![], + )) + .await?; + + db.execute(Statement::from_sql_and_values( + DbBackend::Sqlite, + format!("CREATE TABLE {table} (service VARCHAR(256), id VARCHAR(256), last_heartbeat DATETIME, PRIMARY KEY (service, id))", + table = SqliteDriver::member_table_name()), + vec![], + )) + .await?; + + Ok(db) + } + + #[tokio::test] + async fn test_sql_election() { + let id = "test_id".to_string(); + let conn = prepare_sqlite_env().await.unwrap(); + + let provider = SqliteDriver { conn }; + let (sender, _) = watch::channel(false); + let sql_election_client: Arc = Arc::new(SqlBackendElectionClient { + id, + driver: Arc::new(provider), + is_leader_sender: sender, + }); + let (stop_sender, _) = watch::channel(()); + + let stop_receiver = stop_sender.subscribe(); + + let mut receiver = sql_election_client.subscribe(); + let client_ = sql_election_client.clone(); + tokio::spawn(async move { client_.run_once(10, stop_receiver).await.unwrap() }); + + loop { + receiver.changed().await.unwrap(); + if *receiver.borrow() { + assert!(sql_election_client.is_leader().await); + break; + } + } + } + + #[tokio::test] + async fn test_sql_election_multi() { + let (stop_sender, _) = watch::channel(()); + + let mut clients = vec![]; + + let conn = prepare_sqlite_env().await.unwrap(); + for i in 1..3 { + let id = format!("test_id_{}", i); + let provider = SqliteDriver { conn: conn.clone() }; + let (sender, _) = watch::channel(false); + let sql_election_client: Arc = Arc::new(SqlBackendElectionClient { + id, + driver: Arc::new(provider), + is_leader_sender: sender, + }); + + let stop_receiver = stop_sender.subscribe(); + let client_ = sql_election_client.clone(); + tokio::spawn(async move { client_.run_once(10, stop_receiver).await.unwrap() }); + clients.push(sql_election_client); + } + + let mut is_leaders = vec![]; + + for client in clients { + is_leaders.push(client.is_leader().await); + } + + assert!(is_leaders.iter().filter(|&x| *x).count() <= 1); + } +} diff --git a/src/meta/src/rpc/metrics.rs b/src/meta/src/rpc/metrics.rs index c19c06d9ab2cd..3183007753cbd 100644 --- a/src/meta/src/rpc/metrics.rs +++ b/src/meta/src/rpc/metrics.rs @@ -37,7 +37,7 @@ use tokio::task::JoinHandle; use crate::hummock::HummockManagerRef; use crate::manager::{CatalogManagerRef, ClusterManagerRef, FragmentManagerRef}; -use crate::rpc::server::ElectionClientRef; +use crate::rpc::ElectionClientRef; #[derive(Clone)] pub struct MetaMetrics { @@ -536,7 +536,7 @@ impl MetaMetrics { let sink_info = register_int_gauge_vec_with_registry!( "sink_info", "Mapping from actor id to (actor id, sink name)", - &["actor_id", "sink_name",], + &["actor_id", "sink_id", "sink_name",], registry ) .unwrap(); @@ -552,7 +552,7 @@ impl MetaMetrics { let opts = histogram_opts!( "storage_compact_task_size", "Total size of compact that have been issued to state store", - exponential_buckets(4096.0, 1.6, 28).unwrap() + exponential_buckets(1048576.0, 2.0, 16).unwrap() ); let compact_task_size = @@ -690,7 +690,7 @@ impl Default for MetaMetrics { } } -pub async fn start_worker_info_monitor( +pub fn start_worker_info_monitor( cluster_manager: ClusterManagerRef, election_client: Option, interval: Duration, @@ -738,7 +738,7 @@ pub async fn start_worker_info_monitor( (join_handle, shutdown_tx) } -pub async fn start_fragment_info_monitor( +pub fn start_fragment_info_monitor( cluster_manager: ClusterManagerRef, catalog_manager: CatalogManagerRef, fragment_manager: FragmentManagerRef, @@ -810,13 +810,14 @@ pub async fn start_fragment_info_monitor( if let Some(stream_node) = &actor.nodes { if let Some(Sink(sink_node)) = &stream_node.node_body { - let sink_name = match &sink_node.sink_desc { - Some(sink_desc) => &sink_desc.name, - _ => "unknown", + let (sink_id, sink_name) = match &sink_node.sink_desc { + Some(sink_desc) => (sink_desc.id, sink_desc.name.as_str()), + _ => (0, "unknown"), // unreachable }; + let sink_id_str = sink_id.to_string(); meta_metrics .sink_info - .with_label_values(&[&actor_id_str, sink_name]) + .with_label_values(&[&actor_id_str, &sink_id_str, sink_name]) .set(1); } } diff --git a/src/meta/src/rpc/mod.rs b/src/meta/src/rpc/mod.rs index 6baa080fba83b..99f1b51eaafce 100644 --- a/src/meta/src/rpc/mod.rs +++ b/src/meta/src/rpc/mod.rs @@ -12,18 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod cloud_provider; +pub mod cloud_provider; pub mod ddl_controller; -mod election_client; -mod intercept; +pub mod election; +pub mod intercept; pub mod metrics; -pub mod server; -pub mod service; -pub use election_client::{ElectionClient, ElectionMember, EtcdElectionClient}; -pub use service::cluster_service::ClusterServiceImpl; -pub use service::ddl_service::DdlServiceImpl; -pub use service::heartbeat_service::HeartbeatServiceImpl; -pub use service::hummock_service::HummockServiceImpl; -pub use service::notification_service::NotificationServiceImpl; -pub use service::stream_service::StreamServiceImpl; +pub type ElectionClientRef = std::sync::Arc; + +pub use election::etcd::EtcdElectionClient; +pub use election::{ElectionClient, ElectionMember}; diff --git a/src/meta/src/serving/mod.rs b/src/meta/src/serving/mod.rs index f6d1a5b1aa714..521a8b9ad1c0d 100644 --- a/src/meta/src/serving/mod.rs +++ b/src/meta/src/serving/mod.rs @@ -103,7 +103,7 @@ fn to_deleted_fragment_parallel_unit_mapping( .collect() } -pub(crate) async fn on_meta_start( +pub async fn on_meta_start( notification_manager: NotificationManagerRef, cluster_manager: ClusterManagerRef, fragment_manager: FragmentManagerRef, @@ -126,7 +126,7 @@ pub(crate) async fn on_meta_start( ); } -pub(crate) async fn start_serving_vnode_mapping_worker( +pub async fn start_serving_vnode_mapping_worker( notification_manager: NotificationManagerRef, cluster_manager: ClusterManagerRef, fragment_manager: FragmentManagerRef, diff --git a/src/meta/src/stream/scale.rs b/src/meta/src/stream/scale.rs index a125d61d91703..afe6186165e22 100644 --- a/src/meta/src/stream/scale.rs +++ b/src/meta/src/stream/scale.rs @@ -97,7 +97,7 @@ pub struct RescheduleOptions { pub resolve_no_shuffle_upstream: bool, } -pub(crate) struct RescheduleContext { +pub struct RescheduleContext { /// Index used to map `ParallelUnitId` to `WorkerId` parallel_unit_id_to_worker_id: BTreeMap, /// Meta information for all Actors @@ -171,7 +171,7 @@ impl RescheduleContext { /// assert to fail and should be skipped from the upper level. /// /// The return value is the bitmap distribution after scaling, which covers all virtual node indexes -pub(crate) fn rebalance_actor_vnode( +pub fn rebalance_actor_vnode( actors: &[StreamActor], actors_to_remove: &BTreeSet, actors_to_create: &BTreeSet, diff --git a/src/meta/src/stream/sink.rs b/src/meta/src/stream/sink.rs index 3f46b781bd6b7..8544011071ec2 100644 --- a/src/meta/src/stream/sink.rs +++ b/src/meta/src/stream/sink.rs @@ -16,18 +16,14 @@ use risingwave_connector::dispatch_sink; use risingwave_connector::sink::catalog::SinkCatalog; use risingwave_connector::sink::{build_sink, Sink, SinkParam}; use risingwave_pb::catalog::PbSink; -use risingwave_rpc_client::ConnectorClient; use crate::MetaResult; -pub async fn validate_sink( - prost_sink_catalog: &PbSink, - connector_client: Option, -) -> MetaResult<()> { +pub async fn validate_sink(prost_sink_catalog: &PbSink) -> MetaResult<()> { let sink_catalog = SinkCatalog::from(prost_sink_catalog); let param = SinkParam::from(sink_catalog); let sink = build_sink(param)?; - dispatch_sink!(sink, sink, { Ok(sink.validate(connector_client).await?) }) + dispatch_sink!(sink, sink, Ok(sink.validate().await?)) } diff --git a/src/meta/src/stream/source_manager.rs b/src/meta/src/stream/source_manager.rs index a6b25d5fba4d7..1cd666e5d7160 100644 --- a/src/meta/src/stream/source_manager.rs +++ b/src/meta/src/stream/source_manager.rs @@ -16,18 +16,19 @@ use std::borrow::BorrowMut; use std::cmp::Ordering; use std::collections::hash_map::Entry; use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet}; +use std::ops::Deref; use std::sync::Arc; use std::time::Duration; use anyhow::anyhow; use itertools::Itertools; use risingwave_common::catalog::TableId; +use risingwave_connector::dispatch_source_prop; use risingwave_connector::source::{ - ConnectorProperties, SourceEnumeratorContext, SourceEnumeratorInfo, SplitEnumeratorImpl, - SplitId, SplitImpl, SplitMetaData, + ConnectorProperties, SourceEnumeratorContext, SourceEnumeratorInfo, SourceProperties, + SplitEnumerator, SplitId, SplitImpl, SplitMetaData, }; use risingwave_pb::catalog::Source; -use risingwave_pb::connector_service::PbTableSchema; use risingwave_pb::source::{ConnectorSplit, ConnectorSplits}; use risingwave_rpc_client::ConnectorClient; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; @@ -46,7 +47,7 @@ pub type SourceManagerRef = Arc; pub type SplitAssignment = HashMap>>; pub struct SourceManager { - pub(crate) paused: Mutex<()>, + pub paused: Mutex<()>, env: MetaSrvEnv, barrier_scheduler: BarrierScheduler, core: Mutex, @@ -61,23 +62,29 @@ struct SharedSplitMap { type SharedSplitMapRef = Arc>; -struct ConnectorSourceWorker { +struct ConnectorSourceWorker { source_id: SourceId, source_name: String, current_splits: SharedSplitMapRef, - enumerator: SplitEnumeratorImpl, + enumerator: P::SplitEnumerator, period: Duration, metrics: Arc, - connector_properties: ConnectorProperties, + connector_properties: P, connector_client: Option, fail_cnt: u32, } -impl ConnectorSourceWorker { - const DEFAULT_SOURCE_WORKER_TICK_INTERVAL: Duration = Duration::from_secs(30); +fn extract_prop_from_source(source: &Source) -> MetaResult { + let mut properties = ConnectorProperties::extract(source.properties.clone())?; + properties.init_from_pb_source(source); + Ok(properties) +} + +const DEFAULT_SOURCE_WORKER_TICK_INTERVAL: Duration = Duration::from_secs(30); +impl ConnectorSourceWorker

{ async fn refresh(&mut self) -> MetaResult<()> { - let enumerator = SplitEnumeratorImpl::create( + let enumerator = P::SplitEnumerator::new( self.connector_properties.clone(), Arc::new(SourceEnumeratorContext { metrics: self.metrics.source_enumerator_metrics.clone(), @@ -97,17 +104,13 @@ impl ConnectorSourceWorker { pub async fn create( connector_client: &Option, source: &Source, + connector_properties: P, period: Duration, splits: Arc>, metrics: Arc, ) -> MetaResult { - let mut properties = ConnectorProperties::extract(source.properties.clone())?; - if properties.is_cdc_connector() { - let table_schema = Self::extract_source_schema(source); - properties.init_cdc_properties(table_schema); - } - let enumerator = SplitEnumeratorImpl::create( - properties.clone(), + let enumerator = P::SplitEnumerator::new( + connector_properties.clone(), Arc::new(SourceEnumeratorContext { metrics: metrics.source_enumerator_metrics.clone(), info: SourceEnumeratorInfo { @@ -125,7 +128,7 @@ impl ConnectorSourceWorker { enumerator, period, metrics, - connector_properties: properties, + connector_properties, connector_client: connector_client.clone(), fail_cnt: 0, }) @@ -177,42 +180,19 @@ impl ConnectorSourceWorker { current_splits.splits.replace( splits .into_iter() - .map(|split| (split.id(), split)) + .map(|split| (split.id(), P::Split::into(split))) .collect(), ); Ok(()) } - - fn extract_source_schema(source: &Source) -> PbTableSchema { - let pk_indices = source - .pk_column_ids - .iter() - .map(|&id| { - source - .columns - .iter() - .position(|col| col.column_desc.as_ref().unwrap().column_id == id) - .unwrap() as u32 - }) - .collect_vec(); - - PbTableSchema { - columns: source - .columns - .iter() - .flat_map(|col| &col.column_desc) - .cloned() - .collect(), - pk_indices, - } - } } struct ConnectorSourceWorkerHandle { handle: JoinHandle<()>, sync_call_tx: UnboundedSender>>, splits: SharedSplitMapRef, + enable_scale_in: bool, } impl ConnectorSourceWorkerHandle { @@ -304,7 +284,9 @@ impl SourceManagerCore { *fragment_id, prev_actor_splits, &discovered_splits, - SplitDiffOptions::default(), + SplitDiffOptions { + enable_scale_in: handle.enable_scale_in, + }, ) { split_assignment.insert(*fragment_id, change); } @@ -526,7 +508,7 @@ impl SourceManager { source, &mut managed_sources, metrics.clone(), - ) + )? } } @@ -624,6 +606,7 @@ impl SourceManager { fragment_id, empty_actor_splits, &prev_splits, + // pre-allocate splits is the first time getting splits and it does not have scale in scene SplitDiffOptions::default(), ) .unwrap_or_default(); @@ -712,7 +695,7 @@ impl SourceManager { source: Source, managed_sources: &mut HashMap, metrics: Arc, - ) { + ) -> MetaResult<()> { tracing::info!("spawning new watcher for source {}", source.id); let (sync_call_tx, sync_call_rx) = tokio::sync::mpsc::unbounded_channel(); @@ -721,32 +704,37 @@ impl SourceManager { let current_splits_ref = splits.clone(); let source_id = source.id; + let connector_properties = extract_prop_from_source(&source)?; + let enable_scale_in = connector_properties.enable_split_scale_in(); let handle = tokio::spawn(async move { let mut ticker = time::interval(Self::DEFAULT_SOURCE_TICK_INTERVAL); ticker.set_missed_tick_behavior(MissedTickBehavior::Skip); - let mut worker = loop { - ticker.tick().await; - - match ConnectorSourceWorker::create( - &connector_client, - &source, - ConnectorSourceWorker::DEFAULT_SOURCE_WORKER_TICK_INTERVAL, - splits.clone(), - metrics.clone(), - ) - .await - { - Ok(worker) => { - break worker; - } - Err(e) => { - tracing::warn!("failed to create source worker: {}", e); + dispatch_source_prop!(connector_properties, prop, { + let mut worker = loop { + ticker.tick().await; + + match ConnectorSourceWorker::create( + &connector_client, + &source, + prop.deref().clone(), + DEFAULT_SOURCE_WORKER_TICK_INTERVAL, + splits.clone(), + metrics.clone(), + ) + .await + { + Ok(worker) => { + break worker; + } + Err(e) => { + tracing::warn!("failed to create source worker: {}", e); + } } - } - }; + }; - worker.run(sync_call_rx).await + worker.run(sync_call_rx).await + }); }); managed_sources.insert( @@ -755,8 +743,10 @@ impl SourceManager { handle, sync_call_tx, splits: current_splits_ref, + enable_scale_in, }, ); + Ok(()) } async fn create_source_worker( @@ -767,38 +757,42 @@ impl SourceManager { metrics: Arc, ) -> MetaResult<()> { let current_splits_ref = Arc::new(Mutex::new(SharedSplitMap { splits: None })); - let mut worker = ConnectorSourceWorker::create( - &connector_client, - source, - ConnectorSourceWorker::DEFAULT_SOURCE_WORKER_TICK_INTERVAL, - current_splits_ref.clone(), - metrics, - ) - .await?; - - tracing::info!("spawning new watcher for source {}", source.id); - - // don't force tick in process of recovery. One source down should not lead to meta recovery - // failure. - if force_tick { - // if fail to fetch meta info, will refuse to create source - - // todo: make the timeout configurable, longer than `properties.sync.call.timeout` in - // kafka - tokio::time::timeout(Self::DEFAULT_SOURCE_TICK_TIMEOUT, worker.tick()) - .await - .map_err(|_e| { - anyhow!( - "failed to fetch meta info for source {}, error: timeout {}", - source.id, - Self::DEFAULT_SOURCE_TICK_TIMEOUT.as_secs() - ) - })??; - } - + let connector_properties = extract_prop_from_source(source)?; + let enable_scale_in = connector_properties.enable_split_scale_in(); let (sync_call_tx, sync_call_rx) = tokio::sync::mpsc::unbounded_channel(); + let handle = dispatch_source_prop!(connector_properties, prop, { + let mut worker = ConnectorSourceWorker::create( + &connector_client, + source, + *prop, + DEFAULT_SOURCE_WORKER_TICK_INTERVAL, + current_splits_ref.clone(), + metrics, + ) + .await?; - let handle = tokio::spawn(async move { worker.run(sync_call_rx).await }); + tracing::info!("spawning new watcher for source {}", source.id); + + // don't force tick in process of recovery. One source down should not lead to meta + // recovery failure. + if force_tick { + // if fail to fetch meta info, will refuse to create source + + // todo: make the timeout configurable, longer than `properties.sync.call.timeout` + // in kafka + tokio::time::timeout(Self::DEFAULT_SOURCE_TICK_TIMEOUT, worker.tick()) + .await + .map_err(|_e| { + anyhow!( + "failed to fetch meta info for source {}, error: timeout {}", + source.id, + Self::DEFAULT_SOURCE_TICK_TIMEOUT.as_secs() + ) + })??; + } + + tokio::spawn(async move { worker.run(sync_call_rx).await }) + }); managed_sources.insert( source.id, @@ -806,6 +800,7 @@ impl SourceManager { handle, sync_call_tx, splits: current_splits_ref, + enable_scale_in, }, ); @@ -924,6 +919,10 @@ mod tests { fn restore_from_json(value: JsonbVal) -> anyhow::Result { serde_json::from_value(value.take()).map_err(|e| anyhow!(e)) } + + fn update_with_offset(&mut self, _start_offset: String) -> anyhow::Result<()> { + Ok(()) + } } fn check_all_splits( diff --git a/src/meta/src/stream/stream_manager.rs b/src/meta/src/stream/stream_manager.rs index 558149787c85f..77a784c64ac09 100644 --- a/src/meta/src/stream/stream_manager.rs +++ b/src/meta/src/stream/stream_manager.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use futures::future::{join_all, try_join_all, BoxFuture}; use itertools::Itertools; use risingwave_common::catalog::TableId; -use risingwave_pb::catalog::Table; +use risingwave_pb::catalog::{CreateType, Table}; use risingwave_pb::stream_plan::update_mutation::MergeUpdate; use risingwave_pb::stream_plan::Dispatcher; use risingwave_pb::stream_service::{ @@ -67,6 +67,8 @@ pub struct CreateStreamingJobContext { pub definition: String, pub mv_table_id: Option, + + pub create_type: CreateType, } impl CreateStreamingJobContext { @@ -112,22 +114,32 @@ impl CreatingStreamingJobInfo { jobs.remove(&job_id); } - async fn cancel_jobs(&self, job_ids: Vec) -> HashMap> { + async fn cancel_jobs( + &self, + job_ids: Vec, + ) -> (HashMap>, Vec) { let mut jobs = self.streaming_jobs.lock().await; let mut receivers = HashMap::new(); + let mut recovered_job_ids = vec![]; for job_id in job_ids { if let Some(job) = jobs.get_mut(&job_id) && let Some(shutdown_tx) = job.shutdown_tx.take() { let (tx, rx) = oneshot::channel(); - if shutdown_tx.send(CreatingState::Canceling{finish_tx: tx}).await.is_ok() { + if shutdown_tx.send(CreatingState::Canceling { finish_tx: tx }).await.is_ok() { receivers.insert(job_id, rx); } else { tracing::warn!("failed to send canceling state"); } + } else { + // If these job ids do not exist in streaming_jobs, + // we can infer they either: + // 1. are entirely non-existent, + // 2. OR they are recovered streaming jobs, and managed by BarrierManager. + recovered_job_ids.push(job_id); } } - receivers + (receivers, recovered_job_ids) } } @@ -159,26 +171,26 @@ pub struct ReplaceTableContext { /// `GlobalStreamManager` manages all the streams in the system. pub struct GlobalStreamManager { - pub(crate) env: MetaSrvEnv, + pub env: MetaSrvEnv, /// Manages definition and status of fragments and actors pub(super) fragment_manager: FragmentManagerRef, /// Broadcasts and collect barriers - pub(crate) barrier_scheduler: BarrierScheduler, + pub barrier_scheduler: BarrierScheduler, /// Maintains information of the cluster - pub(crate) cluster_manager: ClusterManagerRef, + pub cluster_manager: ClusterManagerRef, /// Maintains streaming sources from external system like kafka - pub(crate) source_manager: SourceManagerRef, + pub source_manager: SourceManagerRef, /// Creating streaming job info. creating_job_info: CreatingStreamingJobInfoRef, hummock_manager: HummockManagerRef, - pub(crate) reschedule_lock: RwLock<()>, + pub reschedule_lock: RwLock<()>, } impl GlobalStreamManager { @@ -407,7 +419,7 @@ impl GlobalStreamManager { definition, mv_table_id, internal_tables, - .. + create_type, }: CreateStreamingJobContext, ) -> MetaResult<()> { // Register to compaction group beforehand. @@ -424,8 +436,10 @@ impl GlobalStreamManager { table_fragments.internal_table_ids().len() + mv_table_id.map_or(0, |_| 1) ); revert_funcs.push(Box::pin(async move { - if let Err(e) = hummock_manager_ref.unregister_table_ids(®istered_table_ids).await { - tracing::warn!("Failed to unregister compaction group for {:#?}. They will be cleaned up on node restart. {:#?}", registered_table_ids, e); + if create_type == CreateType::Foreground { + if let Err(e) = hummock_manager_ref.unregister_table_ids(®istered_table_ids).await { + tracing::warn!("Failed to unregister compaction group for {:#?}. They will be cleaned up on node restart. {:#?}", registered_table_ids, e); + } } })); @@ -452,9 +466,11 @@ impl GlobalStreamManager { }) .await { - self.fragment_manager - .drop_table_fragments_vec(&HashSet::from_iter(std::iter::once(table_id))) - .await?; + if create_type == CreateType::Foreground { + self.fragment_manager + .drop_table_fragments_vec(&HashSet::from_iter(std::iter::once(table_id))) + .await?; + } return Err(err); } @@ -483,6 +499,11 @@ impl GlobalStreamManager { let dummy_table_id = table_fragments.table_id(); + let init_split_assignment = self + .source_manager + .pre_allocate_splits(&dummy_table_id) + .await?; + if let Err(err) = self .barrier_scheduler .run_config_change_command_with_pause(Command::ReplaceTable { @@ -490,6 +511,7 @@ impl GlobalStreamManager { new_table_fragments: table_fragments, merge_updates, dispatchers, + init_split_assignment, }) .await { @@ -547,13 +569,18 @@ impl GlobalStreamManager { } /// Cancel streaming jobs and return the canceled table ids. + /// 1. Send cancel message to stream jobs (via `cancel_jobs`). + /// 2. Send cancel message to recovered stream jobs (via `barrier_scheduler`). + /// + /// Cleanup of their state will be cleaned up after the `CancelStreamJob` command succeeds, + /// by the barrier manager for both of them. pub async fn cancel_streaming_jobs(&self, table_ids: Vec) -> Vec { if table_ids.is_empty() { return vec![]; } let _reschedule_job_lock = self.reschedule_lock.read().await; - let receivers = self.creating_job_info.cancel_jobs(table_ids).await; + let (receivers, recovered_job_ids) = self.creating_job_info.cancel_jobs(table_ids).await; let futures = receivers.into_iter().map(|(id, receiver)| async move { if receiver.await.is_ok() { @@ -564,7 +591,35 @@ impl GlobalStreamManager { None } }); - join_all(futures).await.into_iter().flatten().collect_vec() + let mut cancelled_ids = join_all(futures).await.into_iter().flatten().collect_vec(); + + // NOTE(kwannoel): For recovered stream jobs, we can directly cancel them by running the barrier command, + // since Barrier manager manages the recovered stream jobs. + let futures = recovered_job_ids.into_iter().map(|id| async move { + let result: MetaResult<()> = try { + let fragment = self + .fragment_manager + .select_table_fragments_by_table_id(&id) + .await?; + self.barrier_scheduler + .run_command(Command::CancelStreamingJob(fragment)) + .await?; + }; + match result { + Ok(_) => { + tracing::info!("cancelled recovered streaming job {id}"); + Some(id) + }, + Err(_) => { + tracing::error!("failed to cancel recovered streaming job {id}, does {id} correspond to any jobs in `SHOW JOBS`?"); + None + }, + } + }); + let cancelled_recovered_ids = join_all(futures).await.into_iter().flatten().collect_vec(); + + cancelled_ids.extend(cancelled_recovered_ids); + cancelled_ids } } @@ -800,7 +855,7 @@ mod tests { .await?, ); - let (sink_manager, _) = SinkCoordinatorManager::start_worker(None); + let (sink_manager, _) = SinkCoordinatorManager::start_worker(); let barrier_manager = Arc::new(GlobalBarrierManager::new( scheduled_barriers, @@ -890,7 +945,7 @@ mod tests { }; self.catalog_manager - .start_create_table_procedure(&table) + .start_create_table_procedure(&table, vec![]) .await?; self.global_stream_manager .create_streaming_job(table_fragments, ctx) diff --git a/src/meta/src/stream/test_fragmenter.rs b/src/meta/src/stream/test_fragmenter.rs index add6811272b04..68cb8125e67d0 100644 --- a/src/meta/src/stream/test_fragmenter.rs +++ b/src/meta/src/stream/test_fragmenter.rs @@ -271,7 +271,7 @@ fn make_stream_fragments() -> Vec { distribution_key: Default::default(), is_append_only: false, agg_call_states: vec![make_agg_call_result_state(), make_agg_call_result_state()], - result_table: Some(make_empty_table(1)), + intermediate_state_table: Some(make_empty_table(1)), ..Default::default() })), input: vec![filter_node], @@ -314,7 +314,7 @@ fn make_stream_fragments() -> Vec { distribution_key: Default::default(), is_append_only: false, agg_call_states: vec![make_agg_call_result_state(), make_agg_call_result_state()], - result_table: Some(make_empty_table(2)), + intermediate_state_table: Some(make_empty_table(2)), ..Default::default() })), fields: vec![], // TODO: fill this later diff --git a/src/meta/src/telemetry.rs b/src/meta/src/telemetry.rs index 774b3cdda8146..fbbc89c2ff0ec 100644 --- a/src/meta/src/telemetry.rs +++ b/src/meta/src/telemetry.rs @@ -35,7 +35,7 @@ struct NodeCount { } #[derive(Debug, Serialize, Deserialize)] -pub(crate) struct MetaTelemetryReport { +pub struct MetaTelemetryReport { #[serde(flatten)] base: TelemetryReportBase, node_count: NodeCount, @@ -45,12 +45,12 @@ pub(crate) struct MetaTelemetryReport { impl TelemetryReport for MetaTelemetryReport {} -pub(crate) struct MetaTelemetryInfoFetcher { +pub struct MetaTelemetryInfoFetcher { tracking_id: ClusterId, } impl MetaTelemetryInfoFetcher { - pub(crate) fn new(tracking_id: ClusterId) -> Self { + pub fn new(tracking_id: ClusterId) -> Self { Self { tracking_id } } } @@ -63,13 +63,13 @@ impl TelemetryInfoFetcher for MetaTelemetryInfoFetcher { } #[derive(Clone)] -pub(crate) struct MetaReportCreator { +pub struct MetaReportCreator { cluster_mgr: Arc, meta_backend: MetaBackend, } impl MetaReportCreator { - pub(crate) fn new(cluster_mgr: Arc, meta_backend: MetaBackend) -> Self { + pub fn new(cluster_mgr: Arc, meta_backend: MetaBackend) -> Self { Self { cluster_mgr, meta_backend, @@ -79,6 +79,7 @@ impl MetaReportCreator { #[async_trait::async_trait] impl TelemetryReportCreator for MetaReportCreator { + #[expect(refining_impl_trait)] async fn create_report( &self, tracking_id: String, diff --git a/src/object_store/src/object/mem.rs b/src/object_store/src/object/mem.rs index 8be7b75632e32..02ee6c744d33a 100644 --- a/src/object_store/src/object/mem.rs +++ b/src/object_store/src/object/mem.rs @@ -21,16 +21,16 @@ use std::time::{SystemTime, UNIX_EPOCH}; use bytes::{BufMut, Bytes, BytesMut}; use fail::fail_point; -use futures::future::try_join_all; use futures::Stream; use itertools::Itertools; +use risingwave_common::range::RangeBoundsExt; use thiserror::Error; use tokio::io::AsyncRead; use tokio::sync::Mutex; use super::{ - BlockLocation, BoxedStreamingUploader, ObjectError, ObjectMetadata, ObjectResult, ObjectStore, - StreamingUploader, + BoxedStreamingUploader, ObjectError, ObjectMetadata, ObjectRangeBounds, ObjectResult, + ObjectStore, StreamingUploader, }; use crate::object::ObjectMetadataIter; @@ -130,23 +130,11 @@ impl ObjectStore for InMemObjectStore { })) } - async fn read(&self, path: &str, block: Option) -> ObjectResult { + async fn read(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult { fail_point!("mem_read_err", |_| Err(ObjectError::internal( "mem read error" ))); - if let Some(loc) = block { - self.get_object(path, |obj| find_block(obj, loc)).await? - } else { - self.get_object(path, |obj| Ok(obj.clone())).await? - } - } - - async fn readv(&self, path: &str, block_locs: &[BlockLocation]) -> ObjectResult> { - let futures = block_locs - .iter() - .map(|block_loc| self.read(path, Some(*block_loc))) - .collect_vec(); - try_join_all(futures).await + self.get_object(path, range).await } /// Returns a stream reading the object specified in `path`. If given, the stream starts at the @@ -160,23 +148,10 @@ impl ObjectStore for InMemObjectStore { fail_point!("mem_streaming_read_err", |_| Err(ObjectError::internal( "mem streaming read error" ))); - - let bytes = if let Some(pos) = start_pos { - self.get_object(path, |obj| { - find_block( - obj, - BlockLocation { - offset: pos, - size: obj.len() - pos, - }, - ) - }) - .await? - } else { - self.get_object(path, |obj| Ok(obj.clone())).await? - }; - - Ok(Box::new(Cursor::new(bytes?))) + let bytes = self + .get_object(path, start_pos.unwrap_or_default()..) + .await?; + Ok(Box::new(Cursor::new(bytes))) } async fn metadata(&self, path: &str) -> ObjectResult { @@ -254,25 +229,19 @@ impl InMemObjectStore { *SHARED.lock() = InMemObjectStore::new(); } - async fn get_object(&self, path: &str, f: F) -> ObjectResult - where - F: Fn(&Bytes) -> R, - { - self.objects - .lock() - .await + async fn get_object(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult { + let objects = self.objects.lock().await; + + let obj = objects .get(path) .map(|(_, obj)| obj) - .ok_or_else(|| Error::not_found(format!("no object at path '{}'", path)).into()) - .map(f) - } -} + .ok_or_else(|| Error::not_found(format!("no object at path '{}'", path)))?; + + if let Some(end) = range.end() && end > obj.len() { + return Err(Error::other("bad block offset and size").into()); + } -fn find_block(obj: &Bytes, block: BlockLocation) -> ObjectResult { - if block.offset + block.size > obj.len() { - Err(Error::other("bad block offset and size").into()) - } else { - Ok(obj.slice(block.offset..(block.offset + block.size))) + Ok(obj.slice(range)) } } @@ -326,29 +295,19 @@ mod tests { s3.upload("/abc", block).await.unwrap(); // No such object. - let err = s3 - .read("/ab", Some(BlockLocation { offset: 0, size: 3 })) - .await - .unwrap_err(); + let err = s3.read("/ab", 0..3).await.unwrap_err(); assert!(err.is_object_not_found_error()); - let bytes = s3 - .read("/abc", Some(BlockLocation { offset: 4, size: 2 })) - .await - .unwrap(); + let bytes = s3.read("/abc", 4..6).await.unwrap(); assert_eq!(String::from_utf8(bytes.to_vec()).unwrap(), "56".to_string()); // Overflow. - s3.read("/abc", Some(BlockLocation { offset: 4, size: 4 })) - .await - .unwrap_err(); + s3.read("/abc", 4..8).await.unwrap_err(); s3.delete("/abc").await.unwrap(); // No such object. - s3.read("/abc", Some(BlockLocation { offset: 0, size: 3 })) - .await - .unwrap_err(); + s3.read("/abc", 0..3).await.unwrap_err(); } #[tokio::test] @@ -365,14 +324,11 @@ mod tests { uploader.finish().await.unwrap(); // Read whole object. - let read_obj = store.read("/abc", None).await.unwrap(); + let read_obj = store.read("/abc", ..).await.unwrap(); assert!(read_obj.eq(&obj)); // Read part of the object. - let read_obj = store - .read("/abc", Some(BlockLocation { offset: 4, size: 2 })) - .await - .unwrap(); + let read_obj = store.read("/abc", 4..6).await.unwrap(); assert_eq!( String::from_utf8(read_obj.to_vec()).unwrap(), "56".to_string() diff --git a/src/object_store/src/object/mod.rs b/src/object_store/src/object/mod.rs index 0fba8dadda474..38b826920d158 100644 --- a/src/object_store/src/object/mod.rs +++ b/src/object_store/src/object/mod.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::RangeBounds; use std::sync::Arc; use std::time::Duration; @@ -41,6 +42,8 @@ pub type ObjectStreamingUploader = MonitoredStreamingUploader; type BoxedStreamingUploader = Box; +pub trait ObjectRangeBounds = RangeBounds + Clone + Send + Sync + std::fmt::Debug + 'static; + /// Partitions a set of given paths into two vectors. The first vector contains all local paths, and /// the second contains all remote paths. fn partition_object_store_paths(paths: &[String]) -> Vec { @@ -55,12 +58,6 @@ fn partition_object_store_paths(paths: &[String]) -> Vec { vec_rem } -#[derive(Debug, Copy, Clone)] -pub struct BlockLocation { - pub offset: usize, - pub size: usize, -} - #[derive(Debug, Clone, PartialEq)] pub struct ObjectMetadata { // Full path @@ -70,17 +67,6 @@ pub struct ObjectMetadata { pub total_size: usize, } -impl BlockLocation { - /// Generates the http bytes range specifier. - pub fn byte_range_specifier(&self) -> Option { - Some(format!( - "bytes={}-{}", - self.offset, - self.offset + self.size - 1 - )) - } -} - #[async_trait::async_trait] pub trait StreamingUploader: Send { async fn write_bytes(&mut self, data: Bytes) -> ObjectResult<()>; @@ -101,13 +87,10 @@ pub trait ObjectStore: Send + Sync { async fn streaming_upload(&self, path: &str) -> ObjectResult; - /// If the `block_loc` is None, the whole object will be returned. /// If objects are PUT using a multipart upload, it's a good practice to GET them in the same /// part sizes (or at least aligned to part boundaries) for best performance. /// - async fn read(&self, path: &str, block_loc: Option) -> ObjectResult; - - async fn readv(&self, path: &str, block_locs: &[BlockLocation]) -> ObjectResult>; + async fn read(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult; /// Returns a stream reading the object specified in `path`. If given, the stream starts at the /// byte with index `start_pos` (0-based). As far as possible, the stream only loads the amount @@ -208,16 +191,8 @@ impl ObjectStoreImpl { object_store_impl_method_body!(self, streaming_upload, dispatch_async, path) } - pub async fn read(&self, path: &str, block_loc: Option) -> ObjectResult { - object_store_impl_method_body!(self, read, dispatch_async, path, block_loc) - } - - pub async fn readv( - &self, - path: &str, - block_locs: &[BlockLocation], - ) -> ObjectResult> { - object_store_impl_method_body!(self, readv, dispatch_async, path, block_locs) + pub async fn read(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult { + object_store_impl_method_body!(self, read, dispatch_async, path, range) } pub async fn metadata(&self, path: &str) -> ObjectResult { @@ -626,7 +601,7 @@ impl MonitoredObjectStore { )) } - pub async fn read(&self, path: &str, block_loc: Option) -> ObjectResult { + pub async fn read(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult { let operation_type = "read"; let _timer = self .object_store_metrics @@ -635,7 +610,7 @@ impl MonitoredObjectStore { .start_timer(); let future = async { self.inner - .read(path, block_loc) + .read(path, range) .verbose_instrument_await("object_store_read") .await }; @@ -659,43 +634,6 @@ impl MonitoredObjectStore { Ok(data) } - pub async fn readv( - &self, - path: &str, - block_locs: &[BlockLocation], - ) -> ObjectResult> { - let operation_type = "readv"; - let _timer = self - .object_store_metrics - .operation_latency - .with_label_values(&[self.media_type(), operation_type]) - .start_timer(); - - let future = async { - self.inner - .readv(path, block_locs) - .verbose_instrument_await("object_store_readv") - .await - }; - let res = match self.read_timeout.as_ref() { - None => future.await, - Some(timeout) => tokio::time::timeout(*timeout, future) - .await - .unwrap_or_else(|_| Err(ObjectError::internal("readv timeout"))), - }; - - try_update_failure_metric(&self.object_store_metrics, &res, operation_type); - - let data = res?; - let data_len = data.iter().map(|block| block.len()).sum::() as u64; - self.object_store_metrics.read_bytes.inc_by(data_len); - self.object_store_metrics - .operation_size - .with_label_values(&[operation_type]) - .observe(data_len as f64); - Ok(data) - } - /// Returns a stream reading the object specified in `path`. If given, the stream starts at the /// byte with index `start_pos` (0-based). As far as possible, the stream only loads the amount /// of data into memory that is read from the stream. @@ -949,7 +887,7 @@ pub async fn parse_remote_object_store( } other => { unimplemented!( - "{} remote object store only supports s3, minio, disk, memory, and memory-shared for now.", + "{} remote object store only supports s3, minio, gcs, oss, cos, azure blob, hdfs, disk, memory, and memory-shared.", other ) } diff --git a/src/object_store/src/object/opendal_engine/opendal_object_store.rs b/src/object_store/src/object/opendal_engine/opendal_object_store.rs index de410507da622..b829dbd544abf 100644 --- a/src/object_store/src/object/opendal_engine/opendal_object_store.rs +++ b/src/object_store/src/object/opendal_engine/opendal_object_store.rs @@ -17,15 +17,15 @@ use std::task::{ready, Context, Poll}; use bytes::Bytes; use fail::fail_point; -use futures::future::{try_join_all, BoxFuture}; +use futures::future::BoxFuture; use futures::{FutureExt, Stream, StreamExt}; -use itertools::Itertools; use opendal::services::Memory; use opendal::{Entry, Error, Lister, Metakey, Operator, Writer}; +use risingwave_common::range::RangeBoundsExt; use tokio::io::AsyncRead; use crate::object::{ - BlockLocation, BoxedStreamingUploader, ObjectError, ObjectMetadata, ObjectMetadataIter, + BoxedStreamingUploader, ObjectError, ObjectMetadata, ObjectMetadataIter, ObjectRangeBounds, ObjectResult, ObjectStore, StreamingUploader, }; @@ -80,28 +80,24 @@ impl ObjectStore for OpendalObjectStore { )) } - async fn read(&self, path: &str, block: Option) -> ObjectResult { - match block { - Some(block) => { - let range = block.offset as u64..(block.offset + block.size) as u64; - let res = Bytes::from(self.op.range_read(path, range).await?); + async fn read(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult { + let data = if range.is_full() { + self.op.read(path).await? + } else { + self.op.range_read(path, range.map(|v| *v as u64)).await? + }; - if block.size != res.len() { - Err(ObjectError::internal("bad block offset and size")) - } else { - Ok(res) - } - } - None => Ok(Bytes::from(self.op.read(path).await?)), + if let Some(len) = range.len() && len != data.len() { + return Err(ObjectError::internal(format!( + "mismatched size: expected {}, found {} when reading {} at {:?}", + len, + data.len(), + path, + range, + ))); } - } - async fn readv(&self, path: &str, block_locs: &[BlockLocation]) -> ObjectResult> { - let futures = block_locs - .iter() - .map(|block_loc| self.read(path, Some(*block_loc))) - .collect_vec(); - try_join_all(futures).await + Ok(Bytes::from(data)) } /// Returns a stream reading the object specified in `path`. If given, the stream starts at the @@ -314,13 +310,6 @@ mod tests { result } - fn gen_test_payload() -> Vec { - let mut ret = Vec::new(); - for i in 0..100000 { - ret.extend(format!("{:05}", i).as_bytes()); - } - ret - } #[tokio::test] async fn test_memory_upload() { let block = Bytes::from("123456"); @@ -328,36 +317,18 @@ mod tests { store.upload("/abc", block).await.unwrap(); // No such object. - store - .read("/ab", Some(BlockLocation { offset: 0, size: 3 })) - .await - .unwrap_err(); - - let bytes = store - .read("/abc", Some(BlockLocation { offset: 4, size: 2 })) - .await - .unwrap(); + store.read("/ab", 0..3).await.unwrap_err(); + + let bytes = store.read("/abc", 4..6).await.unwrap(); assert_eq!(String::from_utf8(bytes.to_vec()).unwrap(), "56".to_string()); // Overflow. - store - .read( - "/abc", - Some(BlockLocation { - offset: 4, - size: 40, - }), - ) - .await - .unwrap_err(); + store.read("/abc", 4..44).await.unwrap_err(); store.delete("/abc").await.unwrap(); // No such object. - store - .read("/abc", Some(BlockLocation { offset: 0, size: 3 })) - .await - .unwrap_err(); + store.read("/abc", 0..3).await.unwrap_err(); } #[tokio::test] @@ -389,40 +360,9 @@ mod tests { store.delete_objects(&str_list).await.unwrap(); - assert!(store.read("prefix/abc/", None).await.is_err()); - assert!(store.read("prefix/xyz/", None).await.is_err()); + assert!(store.read("prefix/abc/", ..).await.is_err()); + assert!(store.read("prefix/xyz/", ..).await.is_err()); assert_eq!(list_all("", &store).await.len(), 1); assert_eq!(list_all("prefix/", &store).await.len(), 0); } - - #[tokio::test] - async fn test_memory_read_multi_block() { - let store = OpendalObjectStore::new_memory_engine().unwrap(); - let payload = gen_test_payload(); - store - .upload("test.obj", Bytes::from(payload.clone())) - .await - .unwrap(); - let metadata = store.metadata("test.obj").await.unwrap(); - assert_eq!(payload.len(), metadata.total_size); - let test_loc = [(0, 1000), (10000, 1000), (20000, 1000)]; - let read_data = store - .readv( - "test.obj", - &test_loc - .iter() - .map(|(offset, size)| BlockLocation { - offset: *offset, - size: *size, - }) - .collect_vec(), - ) - .await - .unwrap(); - assert_eq!(test_loc.len(), read_data.len()); - for (i, (offset, size)) in test_loc.iter().enumerate() { - assert_eq!(&payload[*offset..(*offset + *size)], &read_data[i][..]); - } - store.delete("test.obj").await.unwrap(); - } } diff --git a/src/object_store/src/object/s3.rs b/src/object_store/src/object/s3.rs index e0d561e3bdb32..69e7f3687fdeb 100644 --- a/src/object_store/src/object/s3.rs +++ b/src/object_store/src/object/s3.rs @@ -41,13 +41,14 @@ use hyper::Body; use itertools::Itertools; use risingwave_common::config::default::s3_objstore_config; use risingwave_common::monitor::connection::monitor_connector; +use risingwave_common::range::RangeBoundsExt; use tokio::io::AsyncRead; use tokio::task::JoinHandle; use tokio_retry::strategy::{jitter, ExponentialBackoff}; use super::object_metrics::ObjectStoreMetrics; use super::{ - BlockLocation, BoxedStreamingUploader, Bytes, ObjectError, ObjectMetadata, ObjectResult, + BoxedStreamingUploader, Bytes, ObjectError, ObjectMetadata, ObjectRangeBounds, ObjectResult, ObjectStore, StreamingUploader, }; use crate::object::{try_update_failure_metric, ObjectMetadataIter}; @@ -347,29 +348,16 @@ impl ObjectStore for S3ObjectStore { } /// Amazon S3 doesn't support retrieving multiple ranges of data per GET request. - async fn read(&self, path: &str, block_loc: Option) -> ObjectResult { + async fn read(&self, path: &str, range: impl ObjectRangeBounds) -> ObjectResult { fail_point!("s3_read_err", |_| Err(ObjectError::internal( "s3 read error" ))); - let (start_pos, end_pos) = block_loc.as_ref().map_or((None, None), |block_loc| { - ( - Some(block_loc.offset), - Some( - block_loc.offset + block_loc.size - 1, // End is inclusive. - ), - ) - }); - // retry if occurs AWS EC2 HTTP timeout error. let resp = tokio_retry::RetryIf::spawn( self.config.get_retry_strategy(), || async { - match self - .obj_store_request(path, start_pos, end_pos) - .send() - .await - { + match self.obj_store_request(path, range.clone()).send().await { Ok(resp) => Ok(resp), Err(err) => { if let SdkError::DispatchFailure(e) = &err @@ -391,24 +379,17 @@ impl ObjectStore for S3ObjectStore { let val = resp.body.collect().await?.into_bytes(); - if block_loc.is_some() && block_loc.as_ref().unwrap().size != val.len() { + if let Some(len) = range.len() && len != val.len() { return Err(ObjectError::internal(format!( "mismatched size: expected {}, found {} when reading {} at {:?}", - block_loc.as_ref().unwrap().size, + len, val.len(), path, - block_loc.as_ref().unwrap() + range, ))); } - Ok(val) - } - async fn readv(&self, path: &str, block_locs: &[BlockLocation]) -> ObjectResult> { - let futures = block_locs - .iter() - .map(|block_loc| self.read(path, Some(*block_loc))) - .collect_vec(); - try_join_all(futures).await + Ok(val) } async fn metadata(&self, path: &str) -> ObjectResult { @@ -448,7 +429,11 @@ impl ObjectStore for S3ObjectStore { let resp = tokio_retry::RetryIf::spawn( self.config.get_retry_strategy(), || async { - match self.obj_store_request(path, start_pos, None).send().await { + match self + .obj_store_request(path, start_pos.unwrap_or_default()..) + .send() + .await + { Ok(resp) => Ok(resp), Err(err) => { if let SdkError::DispatchFailure(e) = &err @@ -631,7 +616,16 @@ impl S3ObjectStore { pub async fn with_minio(server: &str, metrics: Arc) -> Self { let server = server.strip_prefix("minio://").unwrap(); let (access_key_id, rest) = server.split_once(':').unwrap(); - let (secret_access_key, rest) = rest.split_once('@').unwrap(); + let (secret_access_key, mut rest) = rest.split_once('@').unwrap(); + let endpoint_prefix = if let Some(rest_stripped) = rest.strip_prefix("https://") { + rest = rest_stripped; + "https://" + } else if let Some(rest_stripped) = rest.strip_prefix("http://") { + rest = rest_stripped; + "http://" + } else { + "http://" + }; let (address, bucket) = rest.split_once('/').unwrap(); #[cfg(madsim)] @@ -641,10 +635,9 @@ impl S3ObjectStore { aws_sdk_s3::config::Builder::from(&aws_config::ConfigLoader::default().load().await) .force_path_style(true) .http_connector(Self::new_http_connector(&S3ObjectStoreConfig::default())); - let config = builder .region(Region::new("custom")) - .endpoint_url(format!("http://{}", address)) + .endpoint_url(format!("{}{}", endpoint_prefix, address)) .credentials_provider(Credentials::from_keys( access_key_id, secret_access_key, @@ -675,25 +668,17 @@ impl S3ObjectStore { fn obj_store_request( &self, path: &str, - start_pos: Option, - end_pos: Option, + range: impl ObjectRangeBounds, ) -> GetObjectFluentBuilder { let req = self.client.get_object().bucket(&self.bucket).key(path); - - match (start_pos, end_pos) { - (None, None) => { - // No range is given. Return request as is. - req - } - _ => { - // At least one boundary is given. Return request with range limitation. - req.range(format!( - "bytes={}-{}", - start_pos.map_or(String::new(), |pos| pos.to_string()), - end_pos.map_or(String::new(), |pos| pos.to_string()) - )) - } + if range.is_full() { + return req; } + + let start = range.start().map(|v| v.to_string()).unwrap_or_default(); + let end = range.end().map(|v| (v - 1).to_string()).unwrap_or_default(); // included + + req.range(format!("bytes={}-{}", start, end)) } // When multipart upload is aborted, if any part uploads are in progress, those part uploads @@ -711,7 +696,7 @@ impl S3ObjectStore { /// - /// - MinIO /// - - pub async fn configure_bucket_lifecycle(&self) { + pub async fn configure_bucket_lifecycle(&self) -> bool { // Check if lifecycle is already configured to avoid overriding existing configuration. let bucket = self.bucket.as_str(); let mut configured_rules = vec![]; @@ -721,8 +706,12 @@ impl S3ObjectStore { .bucket(bucket) .send() .await; + let mut is_expiration_configured = false; if let Ok(config) = &get_config_result { for rule in config.rules().unwrap_or_default() { + if rule.expiration().is_some() { + is_expiration_configured = true; + } if matches!(rule.status().unwrap(), ExpirationStatus::Enabled) && rule.abort_incomplete_multipart_upload().is_some() { @@ -769,6 +758,13 @@ impl S3ObjectStore { tracing::warn!("Failed to configure life cycle rule for S3 bucket: {:?}. It is recommended to configure it manually to avoid unnecessary storage cost.", bucket); } } + if is_expiration_configured { + tracing::info!( + "S3 bucket {} has already configured the expiration for the lifecycle.", + bucket, + ); + } + is_expiration_configured } #[inline(always)] diff --git a/src/prost/Cargo.toml b/src/prost/Cargo.toml index a1acb61dc86d8..d373207966640 100644 --- a/src/prost/Cargo.toml +++ b/src/prost/Cargo.toml @@ -9,10 +9,11 @@ repository = { workspace = true } [dependencies] enum-as-inner = "0.6" -pbjson = "0.5" -prost = "0.11" +pbjson = "0.6" +prost = { workspace = true } prost-helpers = { path = "helpers" } serde = { version = "1", features = ["derive"] } +strum = "0.25" tonic = { workspace = true } [target.'cfg(not(madsim))'.dependencies] diff --git a/src/prost/build.rs b/src/prost/build.rs index 6fac20950b1fa..12476f60b9ac0 100644 --- a/src/prost/build.rs +++ b/src/prost/build.rs @@ -58,6 +58,9 @@ fn main() -> Result<(), Box> { .map(|f| format!("{}/{}.proto", proto_dir, f)) .collect(); + // Paths to generate `BTreeMap` for protobuf maps. + let btree_map_paths = [".monitor_service.StackTraceResponse"]; + // Build protobuf structs. // We first put generated files to `OUT_DIR`, then copy them to `/src` only if they are changed. @@ -72,7 +75,10 @@ fn main() -> Result<(), Box> { .compile_well_known_types(true) .protoc_arg("--experimental_allow_proto3_optional") .type_attribute(".", "#[derive(prost_helpers::AnyPB)]") - .type_attribute("node_body", "#[derive(::enum_as_inner::EnumAsInner)]") + .type_attribute( + "node_body", + "#[derive(::enum_as_inner::EnumAsInner, ::strum::Display)]", + ) .type_attribute("rex_node", "#[derive(::enum_as_inner::EnumAsInner)]") .type_attribute( "meta.PausedReason", @@ -82,6 +88,7 @@ fn main() -> Result<(), Box> { "stream_plan.Barrier.BarrierKind", "#[derive(::enum_as_inner::EnumAsInner)]", ) + .btree_map(btree_map_paths) // Eq + Hash are for plan nodes to do common sub-plan detection. // The requirement is from Source node -> SourceCatalog -> WatermarkDesc -> expr .type_attribute("catalog.WatermarkDesc", "#[derive(Eq, Hash)]") @@ -116,6 +123,7 @@ fn main() -> Result<(), Box> { // Implement `serde::Serialize` on those structs. let descriptor_set = fs_err::read(file_descriptor_set_path)?; pbjson_build::Builder::new() + .btree_map(btree_map_paths) .register_descriptors(&descriptor_set)? .out_dir(out_dir.as_path()) .build(&["."]) diff --git a/src/prost/helpers/Cargo.toml b/src/prost/helpers/Cargo.toml index 50d9b4febd80b..c78ac3f2a8ece 100644 --- a/src/prost/helpers/Cargo.toml +++ b/src/prost/helpers/Cargo.toml @@ -11,9 +11,6 @@ proc-macro2 = { version = "1", default-features = false } quote = "1" syn = "2" -[target.'cfg(not(madsim))'.dependencies] -workspace-hack = { path = "../../workspace-hack" } - [package.metadata.cargo-machete] ignored = ["workspace-hack"] diff --git a/src/prost/helpers/src/lib.rs b/src/prost/helpers/src/lib.rs index f4d1d1a45baa1..5796e14273fe9 100644 --- a/src/prost/helpers/src/lib.rs +++ b/src/prost/helpers/src/lib.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] #![feature(iterator_try_collect)] use proc_macro::TokenStream; @@ -24,7 +24,7 @@ mod generate; /// This attribute will be placed before any pb types, including messages and enums. /// See `prost/helpers/README.md` for more details. -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] #[proc_macro_derive(AnyPB)] pub fn any_pb(input: TokenStream) -> TokenStream { // Parse the string representation @@ -37,7 +37,7 @@ pub fn any_pb(input: TokenStream) -> TokenStream { } // Procedure macros can not be tested from the same crate. -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] fn produce(ast: &DeriveInput) -> Result { let name = &ast.ident; diff --git a/src/risedevtool/common.toml b/src/risedevtool/common.toml index a664f13635a08..9e0b30ae7e561 100644 --- a/src/risedevtool/common.toml +++ b/src/risedevtool/common.toml @@ -1,5 +1,6 @@ [env] RISEDEV = "1" +RUST_BACKTRACE = "1" OS = { source = "${CARGO_MAKE_RUST_TARGET_OS}", mapping = { linux = "linux", macos = "darwin" } } ARCH = { source = "${CARGO_MAKE_RUST_TARGET_ARCH}", mapping = { x86_64 = "amd64", aarch64 = "arm64" } } SYSTEM = "${OS}-${ARCH}" diff --git a/src/risedevtool/config/Cargo.toml b/src/risedevtool/config/Cargo.toml index 441742e3c2b6c..e33eb0afd1647 100644 --- a/src/risedevtool/config/Cargo.toml +++ b/src/risedevtool/config/Cargo.toml @@ -11,7 +11,7 @@ repository = { workspace = true } anyhow = { version = "1", features = ["backtrace"] } clap = { version = "4", features = ["derive"] } console = "0.15" -dialoguer = "0.10" +dialoguer = "0.11" enum-iterator = "1" fs-err = "2.9.0" itertools = "0.11" diff --git a/src/risedevtool/config/src/main.rs b/src/risedevtool/config/src/main.rs index 2b1a4968d6195..79df34bd815dc 100644 --- a/src/risedevtool/config/src/main.rs +++ b/src/risedevtool/config/src/main.rs @@ -73,6 +73,7 @@ pub enum Components { Sanitizer, DynamicLinking, HummockTrace, + Coredump, } impl Components { @@ -87,13 +88,14 @@ impl Components { Self::Redis => "[Component] Redis", Self::BuildConnectorNode => "[Build] Build RisingWave Connector (Java)", Self::RustComponents => "[Build] Rust components", - Self::Dashboard => "[Build] Dashboard v2", + Self::Dashboard => "[Build] Dashboard", Self::Tracing => "[Component] Tracing: Grafana Tempo", Self::Release => "[Build] Enable release mode", Self::AllInOne => "[Build] Enable all-in-one binary", Self::Sanitizer => "[Build] Enable sanitizer", Self::DynamicLinking => "[Build] Enable dynamic linking", Self::HummockTrace => "[Build] Hummock Trace", + Self::Coredump => "[Runtime] Enable coredump", } .into() } @@ -135,7 +137,7 @@ to RiseDev directory." } Self::Dashboard => { " -Required if you want to build dashboard v2 from source." +Required if you want to build dashboard from source." } Self::Tracing => { " @@ -179,7 +181,18 @@ but you might need the expertise to install dependencies correctly. " } Self::HummockTrace => { - "With this option enabled, RiseDev will enable tracing for Hummock. See storage/hummock_trace for details." + " +With this option enabled, RiseDev will enable tracing for Hummock. +See storage/hummock_trace for details. + " + } + Self::Coredump => { + " +With this option enabled, RiseDev will unlimit the size of core +files before launching RisingWave. On Apple Silicon platforms, +the binaries will also be codesigned with `get-task-allow` enabled. +As a result, RisingWave will dump the core on panics. + " } } .into() @@ -194,7 +207,7 @@ but you might need the expertise to install dependencies correctly. "ENABLE_KAFKA" => Some(Self::Kafka), "ENABLE_PUBSUB" => Some(Self::Pubsub), "ENABLE_BUILD_RUST" => Some(Self::RustComponents), - "ENABLE_BUILD_DASHBOARD_V2" => Some(Self::Dashboard), + "ENABLE_BUILD_DASHBOARD" => Some(Self::Dashboard), "ENABLE_COMPUTE_TRACING" => Some(Self::Tracing), "ENABLE_RELEASE_PROFILE" => Some(Self::Release), "ENABLE_DYNAMIC_LINKING" => Some(Self::DynamicLinking), @@ -217,7 +230,7 @@ but you might need the expertise to install dependencies correctly. Self::Pubsub => "ENABLE_PUBSUB", Self::Redis => "ENABLE_REDIS", Self::RustComponents => "ENABLE_BUILD_RUST", - Self::Dashboard => "ENABLE_BUILD_DASHBOARD_V2", + Self::Dashboard => "ENABLE_BUILD_DASHBOARD", Self::Tracing => "ENABLE_COMPUTE_TRACING", Self::Release => "ENABLE_RELEASE_PROFILE", Self::AllInOne => "ENABLE_ALL_IN_ONE", @@ -225,6 +238,7 @@ but you might need the expertise to install dependencies correctly. Self::BuildConnectorNode => "ENABLE_BUILD_RW_CONNECTOR", Self::DynamicLinking => "ENABLE_DYNAMIC_LINKING", Self::HummockTrace => "ENABLE_HUMMOCK_TRACE", + Self::Coredump => "ENABLE_COREDUMP", } .into() } diff --git a/src/risedevtool/src/bin/risedev-compose.rs b/src/risedevtool/src/bin/risedev-compose.rs index 087c6519717f1..63925d919bb2b 100644 --- a/src/risedevtool/src/bin/risedev-compose.rs +++ b/src/risedevtool/src/bin/risedev-compose.rs @@ -222,7 +222,6 @@ fn main() -> Result<()> { (c.address.clone(), c.compose(&compose_config)?) } ServiceConfig::Redis(_) => return Err(anyhow!("not supported")), - ServiceConfig::ConnectorNode(_) => return Err(anyhow!("not supported")), }; compose.container_name = service.id().to_string(); if opts.deploy { diff --git a/src/risedevtool/src/bin/risedev-dev.rs b/src/risedevtool/src/bin/risedev-dev.rs index c2e586802489b..474e8dd0cbd15 100644 --- a/src/risedevtool/src/bin/risedev-dev.rs +++ b/src/risedevtool/src/bin/risedev-dev.rs @@ -25,10 +25,10 @@ use indicatif::ProgressBar; use risedev::util::{complete_spin, fail_spin}; use risedev::{ generate_risedev_env, preflight_check, AwsS3Config, CompactorService, ComputeNodeService, - ConfigExpander, ConfigureTmuxTask, ConnectorNodeService, EnsureStopService, ExecuteContext, - FrontendService, GrafanaService, KafkaService, MetaNodeService, MinioService, OpendalConfig, - PrometheusService, PubsubService, RedisService, ServiceConfig, Task, TempoService, - ZooKeeperService, RISEDEV_SESSION_NAME, + ConfigExpander, ConfigureTmuxTask, EnsureStopService, ExecuteContext, FrontendService, + GrafanaService, KafkaService, MetaNodeService, MinioService, OpendalConfig, PrometheusService, + PubsubService, RedisService, ServiceConfig, Task, TempoService, ZooKeeperService, + RISEDEV_SESSION_NAME, }; use tempfile::tempdir; use yaml_rust::YamlEmitter; @@ -114,7 +114,6 @@ fn task_main( ServiceConfig::AwsS3(_) => None, ServiceConfig::OpenDal(_) => None, ServiceConfig::RedPanda(_) => None, - ServiceConfig::ConnectorNode(c) => Some((c.port, c.id.clone())), }; if let Some(x) = listen_info { @@ -339,17 +338,6 @@ fn task_main( ctx.pb .set_message(format!("redis {}:{}", c.address, c.port)); } - ServiceConfig::ConnectorNode(c) => { - let mut ctx = - ExecuteContext::new(&mut logger, manager.new_progress(), status_dir.clone()); - let mut service = ConnectorNodeService::new(c.clone())?; - service.execute(&mut ctx)?; - let mut task = - risedev::ConfigureGrpcNodeTask::new(c.address.clone(), c.port, false)?; - task.execute(&mut ctx)?; - ctx.pb - .set_message(format!("connector grpc://{}:{}", c.address, c.port)); - } } let service_id = service.id().to_string(); diff --git a/src/risedevtool/src/bin/risedev-docslt.rs b/src/risedevtool/src/bin/risedev-docslt.rs index b9e5e70d6e5f8..6a76ed1035959 100644 --- a/src/risedevtool/src/bin/risedev-docslt.rs +++ b/src/risedevtool/src/bin/risedev-docslt.rs @@ -112,10 +112,7 @@ fn main() -> Result<()> { # This file is generated from `{}` at {}.\n\ \n\ statement ok\n\ - set RW_IMPLICIT_FLUSH to true;\n\ - \n\ - statement ok\n\ - set CREATE_COMPACTION_GROUP_FOR_MV to true;\n", + set RW_IMPLICIT_FLUSH to true;\n", path.display(), chrono::Utc::now() )?; diff --git a/src/risedevtool/src/config.rs b/src/risedevtool/src/config.rs index fe7d677a6a765..09e530487d4f0 100644 --- a/src/risedevtool/src/config.rs +++ b/src/risedevtool/src/config.rs @@ -171,9 +171,6 @@ impl ConfigExpander { "kafka" => ServiceConfig::Kafka(serde_yaml::from_str(&out_str)?), "pubsub" => ServiceConfig::Pubsub(serde_yaml::from_str(&out_str)?), "redis" => ServiceConfig::Redis(serde_yaml::from_str(&out_str)?), - "connector-node" => { - ServiceConfig::ConnectorNode(serde_yaml::from_str(&out_str)?) - } "zookeeper" => ServiceConfig::ZooKeeper(serde_yaml::from_str(&out_str)?), "redpanda" => ServiceConfig::RedPanda(serde_yaml::from_str(&out_str)?), other => return Err(anyhow!("unsupported use type: {}", other)), diff --git a/src/risedevtool/src/config_gen/prometheus_gen.rs b/src/risedevtool/src/config_gen/prometheus_gen.rs index aa6422416a31f..2143031f1ba21 100644 --- a/src/risedevtool/src/config_gen/prometheus_gen.rs +++ b/src/risedevtool/src/config_gen/prometheus_gen.rs @@ -79,14 +79,6 @@ impl PrometheusGen { .map(|node| format!("\"{}:{}\"", node.address, 9644)) .join(","); - let connector_node_targets = config - .provide_connector_node - .as_ref() - .unwrap() - .iter() - .map(|node| format!("\"{}:{}\"", node.address, node.exporter_port)) - .join(","); - let now = Local::now().format("%Y%m%d-%H%M%S"); let remote_write = if config.remote_write { @@ -151,10 +143,6 @@ scrape_configs: - job_name: redpanda static_configs: - targets: [{redpanda_targets}] - - - job_name: connector-node - static_configs: - - targets: [{connector_node_targets}] "#, ) } diff --git a/src/risedevtool/src/risedev_env.rs b/src/risedevtool/src/risedev_env.rs index 20b5fa97dae34..2ab3e350165f5 100644 --- a/src/risedevtool/src/risedev_env.rs +++ b/src/risedevtool/src/risedev_env.rs @@ -19,8 +19,9 @@ use std::process::Command; use crate::{add_hummock_backend, HummockInMemoryStrategy, ServiceConfig}; -/// Generate environment variables from the given service configurations to be used by future -/// RiseDev commands, like `risedev ctl` or `risedev psql`. +/// Generate environment variables (put in file `.risingwave/config/risedev-env`) +/// from the given service configurations to be used by future +/// RiseDev commands, like `risedev ctl` or `risedev psql` (). pub fn generate_risedev_env(services: &Vec) -> String { let mut env = String::new(); for item in services { diff --git a/src/risedevtool/src/service_config.rs b/src/risedevtool/src/service_config.rs index 8890f984971fe..516ae872d6c31 100644 --- a/src/risedevtool/src/service_config.rs +++ b/src/risedevtool/src/service_config.rs @@ -37,7 +37,6 @@ pub struct ComputeNodeConfig { pub provide_aws_s3: Option>, pub provide_tempo: Option>, pub user_managed: bool, - pub connector_rpc_endpoint: String, pub total_memory_bytes: usize, pub parallelism: usize, @@ -61,7 +60,6 @@ pub struct MetaNodeConfig { pub user_managed: bool, - pub connector_rpc_endpoint: String, pub provide_etcd_backend: Option>, pub provide_prometheus: Option>, @@ -190,7 +188,6 @@ pub struct PrometheusConfig { pub provide_etcd: Option>, pub provide_redpanda: Option>, pub provide_frontend: Option>, - pub provide_connector_node: Option>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -318,18 +315,6 @@ pub struct RedisConfig { pub address: String, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -#[serde(deny_unknown_fields)] -pub struct ConnectorNodeConfig { - #[serde(rename = "use")] - phantom_use: Option, - pub id: String, - pub port: u16, - pub exporter_port: u16, - pub address: String, -} - /// All service configuration #[derive(Clone, Debug, PartialEq)] pub enum ServiceConfig { @@ -349,7 +334,6 @@ pub enum ServiceConfig { Redis(RedisConfig), ZooKeeper(ZooKeeperConfig), RedPanda(RedPandaConfig), - ConnectorNode(ConnectorNodeConfig), } impl ServiceConfig { @@ -370,7 +354,6 @@ impl ServiceConfig { Self::Pubsub(c) => &c.id, Self::Redis(c) => &c.id, Self::RedPanda(c) => &c.id, - Self::ConnectorNode(c) => &c.id, Self::OpenDal(c) => &c.id, } } diff --git a/src/risedevtool/src/task.rs b/src/risedevtool/src/task.rs index 262a68c52cb9a..a2e4ec9bf46dc 100644 --- a/src/risedevtool/src/task.rs +++ b/src/risedevtool/src/task.rs @@ -15,7 +15,6 @@ mod compactor_service; mod compute_node_service; mod configure_tmux_service; -mod connector_service; mod ensure_stop_service; mod etcd_service; mod frontend_service; @@ -52,7 +51,6 @@ pub use utils::*; pub use self::compactor_service::*; pub use self::compute_node_service::*; pub use self::configure_tmux_service::*; -pub use self::connector_service::*; pub use self::ensure_stop_service::*; pub use self::etcd_service::*; pub use self::frontend_service::*; diff --git a/src/risedevtool/src/task/compactor_service.rs b/src/risedevtool/src/task/compactor_service.rs index d94083745154e..adecc007b8207 100644 --- a/src/risedevtool/src/task/compactor_service.rs +++ b/src/risedevtool/src/task/compactor_service.rs @@ -53,9 +53,7 @@ impl CompactorService { config.listen_address, config.exporter_port )) .arg("--advertise-addr") - .arg(format!("{}:{}", config.address, config.port)) - .arg("--metrics-level") - .arg("info"); + .arg(format!("{}:{}", config.address, config.port)); if let Some(compaction_worker_threads_number) = config.compaction_worker_threads_number.as_ref() { @@ -84,8 +82,6 @@ impl Task for CompactorService { cmd.env("RUST_BACKTRACE", "1"); - // FIXME: Otherwise, CI will throw log size too large error - // cmd.env("RW_QUERY_LOG_PATH", DEFAULT_QUERY_LOG_PATH); if crate::util::is_env_set("RISEDEV_ENABLE_PROFILE") { cmd.env( "RW_PROFILE_PATH", @@ -95,10 +91,9 @@ impl Task for CompactorService { if crate::util::is_env_set("RISEDEV_ENABLE_HEAP_PROFILE") { // See https://linux.die.net/man/3/jemalloc for the descriptions of profiling options - cmd.env( - "MALLOC_CONF", - "prof:true,lg_prof_interval:34,lg_prof_sample:19,prof_prefix:compactor", - ); + let conf = "prof:true,lg_prof_interval:34,lg_prof_sample:19,prof_prefix:compactor"; + cmd.env("_RJEM_MALLOC_CONF", conf); // prefixed for macos + cmd.env("MALLOC_CONF", conf); // unprefixed for linux } cmd.arg("--config-path") diff --git a/src/risedevtool/src/task/compute_node_service.rs b/src/risedevtool/src/task/compute_node_service.rs index 6c705154e0578..ced6bec115f6a 100644 --- a/src/risedevtool/src/task/compute_node_service.rs +++ b/src/risedevtool/src/task/compute_node_service.rs @@ -56,12 +56,8 @@ impl ComputeNodeService { )) .arg("--advertise-addr") .arg(format!("{}:{}", config.address, config.port)) - .arg("--metrics-level") - .arg("info") .arg("--async-stack-trace") .arg(&config.async_stack_trace) - .arg("--connector-rpc-endpoint") - .arg(&config.connector_rpc_endpoint) .arg("--parallelism") .arg(&config.parallelism.to_string()) .arg("--total-memory-bytes") @@ -92,8 +88,6 @@ impl Task for ComputeNodeService { "TOKIO_CONSOLE_BIND", format!("127.0.0.1:{}", self.config.port + 1000), ); - // FIXME: Otherwise, CI will throw log size too large error - // cmd.env("RW_QUERY_LOG_PATH", DEFAULT_QUERY_LOG_PATH); if crate::util::is_env_set("RISEDEV_ENABLE_PROFILE") { cmd.env( "RW_PROFILE_PATH", @@ -103,9 +97,16 @@ impl Task for ComputeNodeService { if crate::util::is_env_set("RISEDEV_ENABLE_HEAP_PROFILE") { // See https://linux.die.net/man/3/jemalloc for the descriptions of profiling options + let conf = "prof:true,lg_prof_interval:34,lg_prof_sample:19,prof_prefix:compute-node"; + cmd.env("_RJEM_MALLOC_CONF", conf); // prefixed for macos + cmd.env("MALLOC_CONF", conf); // unprefixed for linux + } + + if crate::util::is_env_set("ENABLE_BUILD_RW_CONNECTOR") { + let prefix_bin = env::var("PREFIX_BIN")?; cmd.env( - "MALLOC_CONF", - "prof:true,lg_prof_interval:34,lg_prof_sample:19,prof_prefix:compute-node", + "CONNECTOR_LIBS_PATH", + Path::new(&prefix_bin).join("connector-node/libs/"), ); } diff --git a/src/risedevtool/src/task/connector_service.rs b/src/risedevtool/src/task/connector_service.rs deleted file mode 100644 index 05268db6a43ea..0000000000000 --- a/src/risedevtool/src/task/connector_service.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::env; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::process::Command; - -use anyhow::{anyhow, Result}; - -use crate::{ConnectorNodeConfig, ExecuteContext, Task}; - -pub struct ConnectorNodeService { - pub config: ConnectorNodeConfig, -} - -impl ConnectorNodeService { - pub fn new(config: ConnectorNodeConfig) -> Result { - Ok(Self { config }) - } - - fn connector_path(&self) -> Result { - let prefix_bin = env::var("PREFIX_BIN")?; - Ok(Path::new(&prefix_bin) - .join("connector-node") - .join("start-service.sh")) - } -} - -impl Task for ConnectorNodeService { - fn execute(&mut self, ctx: &mut ExecuteContext) -> Result<()> { - ctx.service(self); - ctx.pb.set_message("starting"); - let path = self.connector_path()?; - if !path.exists() { - return Err(anyhow!("RisingWave connector binary not found in {:?}\nPlease enable building RisingWave connector in `./risedev configure`?", path)); - } - let mut cmd = Command::new(path); - cmd.arg("-p").arg(self.config.port.to_string()); - ctx.run_command(ctx.tmux_run(cmd)?)?; - ctx.pb.set_message("started"); - - Ok(()) - } - - fn id(&self) -> String { - self.config.id.clone() - } -} diff --git a/src/risedevtool/src/task/frontend_service.rs b/src/risedevtool/src/task/frontend_service.rs index dd0015ac188bd..cf0213028e465 100644 --- a/src/risedevtool/src/task/frontend_service.rs +++ b/src/risedevtool/src/task/frontend_service.rs @@ -61,9 +61,7 @@ impl FrontendService { .arg(format!( "{}:{}", config.listen_address, config.health_check_port - )) - .arg("--metrics-level") - .arg("info"); + )); let provide_meta_node = config.provide_meta_node.as_ref().unwrap(); if provide_meta_node.is_empty() { @@ -94,8 +92,6 @@ impl Task for FrontendService { let mut cmd = self.frontend()?; cmd.env("RUST_BACKTRACE", "1"); - // FIXME: Otherwise, CI will throw log size too large error - // cmd.env("RW_QUERY_LOG_PATH", DEFAULT_QUERY_LOG_PATH); let prefix_config = env::var("PREFIX_CONFIG")?; cmd.arg("--config-path") diff --git a/src/risedevtool/src/task/meta_node_service.rs b/src/risedevtool/src/task/meta_node_service.rs index 64aca22962f9c..2494a9eceaf16 100644 --- a/src/risedevtool/src/task/meta_node_service.rs +++ b/src/risedevtool/src/task/meta_node_service.rs @@ -60,13 +60,10 @@ impl MetaNodeService { config.listen_address, config.dashboard_port )); - cmd.arg("--prometheus-host") - .arg(format!( - "{}:{}", - config.listen_address, config.exporter_port - )) - .arg("--connector-rpc-endpoint") - .arg(&config.connector_rpc_endpoint); + cmd.arg("--prometheus-host").arg(format!( + "{}:{}", + config.listen_address, config.exporter_port + )); match config.provide_prometheus.as_ref().unwrap().as_slice() { [] => {} @@ -174,8 +171,6 @@ impl Task for MetaNodeService { let mut cmd = self.meta_node()?; cmd.env("RUST_BACKTRACE", "1"); - // FIXME: Otherwise, CI will throw log size too large error - // cmd.env("RW_QUERY_LOG_PATH", DEFAULT_QUERY_LOG_PATH); if crate::util::is_env_set("RISEDEV_ENABLE_PROFILE") { cmd.env( @@ -186,9 +181,16 @@ impl Task for MetaNodeService { if crate::util::is_env_set("RISEDEV_ENABLE_HEAP_PROFILE") { // See https://linux.die.net/man/3/jemalloc for the descriptions of profiling options + let conf = "prof:true,lg_prof_interval:32,lg_prof_sample:19,prof_prefix:meta-node"; + cmd.env("_RJEM_MALLOC_CONF", conf); // prefixed for macos + cmd.env("MALLOC_CONF", conf); // unprefixed for linux + } + + if crate::util::is_env_set("ENABLE_BUILD_RW_CONNECTOR") { + let prefix_bin = env::var("PREFIX_BIN")?; cmd.env( - "MALLOC_CONF", - "prof:true,lg_prof_interval:32,lg_prof_sample:19,prof_prefix:meta-node", + "CONNECTOR_LIBS_PATH", + Path::new(&prefix_bin).join("connector-node/libs/"), ); } diff --git a/src/risedevtool/src/task/utils.rs b/src/risedevtool/src/task/utils.rs index dbb52aaa5e644..cbf1bb8cdcedf 100644 --- a/src/risedevtool/src/task/utils.rs +++ b/src/risedevtool/src/task/utils.rs @@ -19,9 +19,6 @@ use itertools::Itertools; use crate::{AwsS3Config, MetaNodeConfig, MinioConfig, OpendalConfig, TempoConfig}; -#[allow(dead_code)] -pub(crate) const DEFAULT_QUERY_LOG_PATH: &str = ".risingwave/log/"; - /// Add a meta node to the parameters. pub fn add_meta_node(provide_meta_node: &[MetaNodeConfig], cmd: &mut Command) -> Result<()> { match provide_meta_node { diff --git a/src/rpc_client/Cargo.toml b/src/rpc_client/Cargo.toml index 7c3707d4fbc4c..f340837bf5d65 100644 --- a/src/rpc_client/Cargo.toml +++ b/src/rpc_client/Cargo.toml @@ -22,6 +22,7 @@ futures = { version = "0.3", default-features = false, features = ["alloc"] } hyper = "0.14" itertools = "0.11.0" lru = "0.10.1" +moka = { version = "0.12", features = ["future"] } rand = "0.8" risingwave_common = { workspace = true } risingwave_hummock_sdk = { workspace = true } @@ -44,7 +45,6 @@ tracing = "0.1" url = "2.4.1" [target.'cfg(not(madsim))'.dependencies] -moka = { version = "0.11", features = ["future"] } workspace-hack = { path = "../workspace-hack" } [lints] diff --git a/src/rpc_client/src/compactor_client.rs b/src/rpc_client/src/compactor_client.rs index cdd1b08049087..77fd3e0a44700 100644 --- a/src/rpc_client/src/compactor_client.rs +++ b/src/rpc_client/src/compactor_client.rs @@ -12,15 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::sync::Arc; use std::time::Duration; use risingwave_common::util::addr::HostAddr; +use risingwave_pb::hummock::hummock_manager_service_client::HummockManagerServiceClient; +use risingwave_pb::hummock::{ + GetNewSstIdsRequest, GetNewSstIdsResponse, ReportCompactionTaskRequest, + ReportCompactionTaskResponse, ReportFullScanTaskRequest, ReportFullScanTaskResponse, + ReportVacuumTaskRequest, ReportVacuumTaskResponse, +}; +use risingwave_pb::meta::system_params_service_client::SystemParamsServiceClient; +use risingwave_pb::meta::{GetSystemParamsRequest, GetSystemParamsResponse}; use risingwave_pb::monitor_service::monitor_service_client::MonitorServiceClient; use risingwave_pb::monitor_service::{StackTraceRequest, StackTraceResponse}; +use tokio::sync::RwLock; +use tokio_retry::strategy::{jitter, ExponentialBackoff}; use tonic::transport::{Channel, Endpoint}; use crate::error::Result; +use crate::retry_rpc; +const ENDPOINT_KEEP_ALIVE_INTERVAL_SEC: u64 = 60; +const ENDPOINT_KEEP_ALIVE_TIMEOUT_SEC: u64 = 60; +const DEFAULT_RETRY_INTERVAL: u64 = 20; +const DEFAULT_RETRY_MAX_DELAY: Duration = Duration::from_secs(5); +const DEFAULT_RETRY_MAX_ATTEMPTS: usize = 3; #[derive(Clone)] pub struct CompactorClient { pub monitor_client: MonitorServiceClient, @@ -46,3 +63,154 @@ impl CompactorClient { .into_inner()) } } + +#[derive(Debug, Clone)] +pub struct GrpcCompactorProxyClientCore { + hummock_client: HummockManagerServiceClient, + system_params_client: SystemParamsServiceClient, +} + +impl GrpcCompactorProxyClientCore { + pub(crate) fn new(channel: Channel) -> Self { + let hummock_client = + HummockManagerServiceClient::new(channel.clone()).max_decoding_message_size(usize::MAX); + let system_params_client = SystemParamsServiceClient::new(channel); + + Self { + hummock_client, + system_params_client, + } + } +} + +/// Client to proxy server. Cloning the instance is lightweight. +/// +/// Todo(wcy-fdu): add refresh client interface. +#[derive(Debug, Clone)] +pub struct GrpcCompactorProxyClient { + pub core: Arc>, + endpoint: String, +} + +impl GrpcCompactorProxyClient { + pub fn new(channel: Channel, endpoint: String) -> Self { + let core = Arc::new(RwLock::new(GrpcCompactorProxyClientCore::new(channel))); + Self { core, endpoint } + } + + async fn recreate_core(&self) { + tracing::info!("GrpcCompactorProxyClient rpc transfer failed, try to reconnect"); + let channel = self.connect_to_endpoint().await; + let mut core = self.core.write().await; + *core = GrpcCompactorProxyClientCore::new(channel); + } + + async fn connect_to_endpoint(&self) -> Channel { + let endpoint = + Endpoint::from_shared(self.endpoint.clone()).expect("Fail to construct tonic Endpoint"); + endpoint + .http2_keep_alive_interval(Duration::from_secs(ENDPOINT_KEEP_ALIVE_INTERVAL_SEC)) + .keep_alive_timeout(Duration::from_secs(ENDPOINT_KEEP_ALIVE_TIMEOUT_SEC)) + .connect_timeout(Duration::from_secs(5)) + .connect() + .await + .expect("Failed to create channel via proxy rpc endpoint.") + } + + pub async fn get_new_sst_ids( + &self, + request: GetNewSstIdsRequest, + ) -> std::result::Result, tonic::Status> { + retry_rpc!(self, get_new_sst_ids, request, GetNewSstIdsResponse) + } + + pub async fn report_compaction_task( + &self, + request: ReportCompactionTaskRequest, + ) -> std::result::Result, tonic::Status> { + retry_rpc!( + self, + report_compaction_task, + request, + ReportCompactionTaskResponse + ) + } + + pub async fn report_full_scan_task( + &self, + request: ReportFullScanTaskRequest, + ) -> std::result::Result, tonic::Status> { + retry_rpc!( + self, + report_full_scan_task, + request, + ReportFullScanTaskResponse + ) + } + + pub async fn report_vacuum_task( + &self, + request: ReportVacuumTaskRequest, + ) -> std::result::Result, tonic::Status> { + retry_rpc!(self, report_vacuum_task, request, ReportVacuumTaskResponse) + } + + pub async fn get_system_params( + &self, + ) -> std::result::Result, tonic::Status> { + tokio_retry::RetryIf::spawn( + Self::get_retry_strategy(), + || async { + let mut system_params_client = self.core.read().await.system_params_client.clone(); + let rpc_res = system_params_client + .get_system_params(GetSystemParamsRequest {}) + .await; + if rpc_res.is_err() { + self.recreate_core().await; + } + rpc_res + }, + Self::should_retry, + ) + .await + } + + #[inline(always)] + fn get_retry_strategy() -> impl Iterator { + ExponentialBackoff::from_millis(DEFAULT_RETRY_INTERVAL) + .max_delay(DEFAULT_RETRY_MAX_DELAY) + .take(DEFAULT_RETRY_MAX_ATTEMPTS) + .map(jitter) + } + + #[inline(always)] + fn should_retry(status: &tonic::Status) -> bool { + if status.code() == tonic::Code::Unavailable + || status.code() == tonic::Code::Unknown + || (status.code() == tonic::Code::Unauthenticated + && status.message().contains("invalid auth token")) + { + return true; + } + false + } +} + +#[macro_export] +macro_rules! retry_rpc { + ($self:expr, $rpc_call:ident, $request:expr, $response:ty) => { + tokio_retry::RetryIf::spawn( + Self::get_retry_strategy(), + || async { + let mut hummock_client = $self.core.read().await.hummock_client.clone(); + let rpc_res = hummock_client.$rpc_call($request.clone()).await; + if rpc_res.is_err() { + $self.recreate_core().await; + } + rpc_res + }, + Self::should_retry, + ) + .await + }; +} diff --git a/src/rpc_client/src/compute_client.rs b/src/rpc_client/src/compute_client.rs index aac767570052e..15516380bd418 100644 --- a/src/rpc_client/src/compute_client.rs +++ b/src/rpc_client/src/compute_client.rs @@ -27,7 +27,8 @@ use risingwave_pb::compute::config_service_client::ConfigServiceClient; use risingwave_pb::compute::{ShowConfigRequest, ShowConfigResponse}; use risingwave_pb::monitor_service::monitor_service_client::MonitorServiceClient; use risingwave_pb::monitor_service::{ - HeapProfilingRequest, HeapProfilingResponse, ProfilingRequest, ProfilingResponse, + AnalyzeHeapRequest, AnalyzeHeapResponse, HeapProfilingRequest, HeapProfilingResponse, + ListHeapProfilingRequest, ListHeapProfilingResponse, ProfilingRequest, ProfilingResponse, StackTraceRequest, StackTraceResponse, }; use risingwave_pb::task_service::exchange_service_client::ExchangeServiceClient; @@ -211,6 +212,24 @@ impl ComputeClient { .into_inner()) } + pub async fn list_heap_profile(&self) -> Result { + Ok(self + .monitor_client + .to_owned() + .list_heap_profiling(ListHeapProfilingRequest {}) + .await? + .into_inner()) + } + + pub async fn analyze_heap(&self, path: String) -> Result { + Ok(self + .monitor_client + .to_owned() + .analyze_heap(AnalyzeHeapRequest { path }) + .await? + .into_inner()) + } + pub async fn show_config(&self) -> Result { Ok(self .config_client diff --git a/src/rpc_client/src/lib.rs b/src/rpc_client/src/lib.rs index aabb8e7378b65..6afa67ef88efe 100644 --- a/src/rpc_client/src/lib.rs +++ b/src/rpc_client/src/lib.rs @@ -16,11 +16,10 @@ //! response gRPC message structs. #![feature(trait_alias)] -#![feature(binary_heap_drain_sorted)] #![feature(result_option_inspect)] #![feature(type_alias_impl_trait)] #![feature(associated_type_defaults)] -#![feature(generators)] +#![feature(coroutines)] #![feature(iterator_try_collect)] #![feature(hash_extract_if)] #![feature(try_blocks)] @@ -28,8 +27,6 @@ #![feature(impl_trait_in_assoc_type)] use std::any::type_name; -#[cfg(madsim)] -use std::collections::HashMap; use std::fmt::{Debug, Formatter}; use std::future::Future; use std::iter::repeat; @@ -38,17 +35,14 @@ use std::sync::Arc; use anyhow::anyhow; use async_trait::async_trait; use futures::future::try_join_all; -use futures::stream::BoxStream; +use futures::stream::{BoxStream, Peekable}; use futures::{Stream, StreamExt}; -#[cfg(not(madsim))] use moka::future::Cache; use rand::prelude::SliceRandom; use risingwave_common::util::addr::HostAddr; use risingwave_pb::common::WorkerNode; use risingwave_pb::meta::heartbeat_request::extra_info; use tokio::sync::mpsc::{channel, Sender}; -#[cfg(madsim)] -use tokio::sync::Mutex; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status}; @@ -63,11 +57,12 @@ mod sink_coordinate_client; mod stream_client; mod tracing; -pub use compactor_client::CompactorClient; +pub use compactor_client::{CompactorClient, GrpcCompactorProxyClient}; pub use compute_client::{ComputeClient, ComputeClientPool, ComputeClientPoolRef}; pub use connector_client::{ConnectorClient, SinkCoordinatorStreamHandle, SinkWriterStreamHandle}; pub use hummock_meta_client::{CompactionEventItem, HummockMetaClient}; pub use meta_client::{MetaClient, SinkCoordinationRpcClient}; +use risingwave_common::util::await_future_with_monitor_error_stream; pub use sink_coordinate_client::CoordinatorStreamHandle; pub use stream_client::{StreamClient, StreamClientPool, StreamClientPoolRef}; @@ -84,12 +79,7 @@ pub trait RpcClient: Send + Sync + 'static + Clone { pub struct RpcClientPool { connection_pool_size: u16, - #[cfg(not(madsim))] clients: Cache>, - - // moka::Cache internally uses system thread, so we can't use it in simulation - #[cfg(madsim)] - clients: Arc>>, } impl Default for RpcClientPool @@ -108,10 +98,7 @@ where pub fn new(connection_pool_size: u16) -> Self { Self { connection_pool_size, - #[cfg(not(madsim))] clients: Cache::new(u64::MAX), - #[cfg(madsim)] - clients: Arc::new(Mutex::new(HashMap::new())), } } @@ -124,7 +111,6 @@ where /// Gets the RPC client for the given addr. If the connection is not established, a /// new client will be created and returned. - #[cfg(not(madsim))] pub async fn get_by_addr(&self, addr: HostAddr) -> Result { Ok(self .clients @@ -140,17 +126,6 @@ where .unwrap() .clone()) } - - #[cfg(madsim)] - pub async fn get_by_addr(&self, addr: HostAddr) -> Result { - let mut clients = self.clients.lock().await; - if let Some(client) = clients.get(&addr) { - return Ok(client.clone()); - } - let client = S::new_client(addr.clone()).await?; - clients.insert(addr, client.clone()); - Ok(client) - } } /// `ExtraInfoSource` is used by heartbeat worker to pull extra info that needs to be piggybacked. @@ -198,7 +173,7 @@ macro_rules! meta_rpc_client_method_impl { pub struct BidiStreamHandle { request_sender: Sender, - response_stream: BoxStream<'static, std::result::Result>, + response_stream: Peekable>>, } impl Debug for BidiStreamHandle { @@ -214,7 +189,7 @@ impl BidiStreamHandle { ) -> Self { Self { request_sender, - response_stream, + response_stream: response_stream.peekable(), } } @@ -248,7 +223,7 @@ impl BidiStreamHandle { Ok(( Self { request_sender, - response_stream: response_stream.boxed(), + response_stream: response_stream.boxed().peekable(), }, first_response, )) @@ -263,10 +238,16 @@ impl BidiStreamHandle { } pub async fn send_request(&mut self, request: REQ) -> Result<()> { - Ok(self - .request_sender - .send(request) - .await - .map_err(|_| anyhow!("unable to send request {}", type_name::()))?) + match await_future_with_monitor_error_stream( + &mut self.response_stream, + self.request_sender.send(request), + ) + .await + { + Ok(send_result) => send_result + .map_err(|_| anyhow!("unable to send request {}", type_name::()).into()), + Err(None) => Err(anyhow!("end of response stream").into()), + Err(Some(e)) => Err(e.into()), + } } } diff --git a/src/rpc_client/src/meta_client.rs b/src/rpc_client/src/meta_client.rs index 2b87ae995a564..95b746ea33e6c 100644 --- a/src/rpc_client/src/meta_client.rs +++ b/src/rpc_client/src/meta_client.rs @@ -50,9 +50,11 @@ use risingwave_pb::ddl_service::alter_relation_name_request::Relation; use risingwave_pb::ddl_service::ddl_service_client::DdlServiceClient; use risingwave_pb::ddl_service::drop_table_request::SourceId; use risingwave_pb::ddl_service::*; +use risingwave_pb::hummock::get_compaction_score_response::PickerInfo; use risingwave_pb::hummock::hummock_manager_service_client::HummockManagerServiceClient; use risingwave_pb::hummock::rise_ctl_update_compaction_config_request::mutable_config::MutableConfig; use risingwave_pb::hummock::subscribe_compaction_event_request::Register; +use risingwave_pb::hummock::write_limits::WriteLimit; use risingwave_pb::hummock::*; use risingwave_pb::meta::add_worker_node_request::Property; use risingwave_pb::meta::cancel_creating_jobs_request::PbJobs; @@ -429,11 +431,13 @@ impl MetaClient { pub async fn replace_table( &self, + source: Option, table: PbTable, graph: StreamFragmentGraph, table_col_index_mapping: ColIndexMapping, ) -> Result { let request = ReplaceTablePlanRequest { + source, table: Some(table), fragment_graph: Some(graph), table_col_index_mapping: Some(table_col_index_mapping.to_protobuf()), @@ -661,7 +665,7 @@ impl MetaClient { extra_info.push(info); } } - tracing::trace!(target: "events::meta::client_heartbeat", "heartbeat"); + tracing::debug!(target: "events::meta::client_heartbeat", "heartbeat"); match tokio::time::timeout( // TODO: decide better min_interval for timeout min_interval * 3, @@ -694,6 +698,12 @@ impl MetaClient { Ok(resp.snapshot.unwrap()) } + pub async fn wait(&self) -> Result<()> { + let request = WaitRequest {}; + self.inner.wait(request).await?; + Ok(()) + } + pub async fn cancel_creating_jobs(&self, jobs: PbJobs) -> Result> { let request = CancelCreatingJobsRequest { jobs: Some(jobs) }; let resp = self.inner.cancel_creating_jobs(request).await?; @@ -933,10 +943,10 @@ impl MetaClient { Ok(resp.job_id) } - pub async fn get_backup_job_status(&self, job_id: u64) -> Result { + pub async fn get_backup_job_status(&self, job_id: u64) -> Result<(BackupJobStatus, String)> { let req = GetBackupJobStatusRequest { job_id }; let resp = self.inner.get_backup_job_status(req).await?; - Ok(resp.job_status()) + Ok((resp.job_status(), resp.message)) } pub async fn delete_meta_snapshot(&self, snapshot_ids: &[u64]) -> Result<()> { @@ -1042,6 +1052,41 @@ impl MetaClient { )) } + pub async fn get_compaction_score( + &self, + compaction_group_id: CompactionGroupId, + ) -> Result> { + let req = GetCompactionScoreRequest { + compaction_group_id, + }; + let resp = self.inner.get_compaction_score(req).await?; + Ok(resp.scores) + } + + pub async fn risectl_rebuild_table_stats(&self) -> Result<()> { + let req = RiseCtlRebuildTableStatsRequest {}; + let _resp = self.inner.rise_ctl_rebuild_table_stats(req).await?; + Ok(()) + } + + pub async fn list_branched_object(&self) -> Result> { + let req = ListBranchedObjectRequest {}; + let resp = self.inner.list_branched_object(req).await?; + Ok(resp.branched_objects) + } + + pub async fn list_active_write_limit(&self) -> Result> { + let req = ListActiveWriteLimitRequest {}; + let resp = self.inner.list_active_write_limit(req).await?; + Ok(resp.write_limits) + } + + pub async fn list_hummock_meta_config(&self) -> Result> { + let req = ListHummockMetaConfigRequest {}; + let resp = self.inner.list_hummock_meta_config(req).await?; + Ok(resp.configs) + } + pub async fn delete_worker_node(&self, worker: HostAddress) -> Result<()> { let _resp = self .inner @@ -1267,7 +1312,8 @@ impl GrpcMetaClientCore { let cluster_client = ClusterServiceClient::new(channel.clone()); let meta_member_client = MetaMemberClient::new(channel.clone()); let heartbeat_client = HeartbeatServiceClient::new(channel.clone()); - let ddl_client = DdlServiceClient::new(channel.clone()); + let ddl_client = + DdlServiceClient::new(channel.clone()).max_decoding_message_size(usize::MAX); let hummock_client = HummockManagerServiceClient::new(channel.clone()).max_decoding_message_size(usize::MAX); let notification_client = @@ -1465,7 +1511,7 @@ impl GrpcMetaClient { force_refresh_receiver: Receiver>>, meta_config: MetaConfig, ) -> Result<()> { - let core_ref = self.core.clone(); + let core_ref: Arc> = self.core.clone(); let current_leader = init_leader_addr; let enable_period_tick = matches!(members, Either::Right(_)); @@ -1679,6 +1725,7 @@ macro_rules! for_all_meta_rpc { ,{ ddl_client, list_connections, ListConnectionsRequest, ListConnectionsResponse } ,{ ddl_client, drop_connection, DropConnectionRequest, DropConnectionResponse } ,{ ddl_client, get_tables, GetTablesRequest, GetTablesResponse } + ,{ ddl_client, wait, WaitRequest, WaitResponse } ,{ hummock_client, unpin_version_before, UnpinVersionBeforeRequest, UnpinVersionBeforeResponse } ,{ hummock_client, get_current_version, GetCurrentVersionRequest, GetCurrentVersionResponse } ,{ hummock_client, replay_version_delta, ReplayVersionDeltaRequest, ReplayVersionDeltaResponse } @@ -1706,7 +1753,12 @@ macro_rules! for_all_meta_rpc { ,{ hummock_client, init_metadata_for_replay, InitMetadataForReplayRequest, InitMetadataForReplayResponse } ,{ hummock_client, split_compaction_group, SplitCompactionGroupRequest, SplitCompactionGroupResponse } ,{ hummock_client, rise_ctl_list_compaction_status, RiseCtlListCompactionStatusRequest, RiseCtlListCompactionStatusResponse } + ,{ hummock_client, get_compaction_score, GetCompactionScoreRequest, GetCompactionScoreResponse } + ,{ hummock_client, rise_ctl_rebuild_table_stats, RiseCtlRebuildTableStatsRequest, RiseCtlRebuildTableStatsResponse } ,{ hummock_client, subscribe_compaction_event, impl tonic::IntoStreamingRequest, Streaming } + ,{ hummock_client, list_branched_object, ListBranchedObjectRequest, ListBranchedObjectResponse } + ,{ hummock_client, list_active_write_limit, ListActiveWriteLimitRequest, ListActiveWriteLimitResponse } + ,{ hummock_client, list_hummock_meta_config, ListHummockMetaConfigRequest, ListHummockMetaConfigResponse } ,{ user_client, create_user, CreateUserRequest, CreateUserResponse } ,{ user_client, update_user, UpdateUserRequest, UpdateUserResponse } ,{ user_client, drop_user, DropUserRequest, DropUserResponse } diff --git a/src/source/Cargo.toml b/src/source/Cargo.toml index bf60bc45f7395..aedb0b9158908 100644 --- a/src/source/Cargo.toml +++ b/src/source/Cargo.toml @@ -15,7 +15,6 @@ normal = ["workspace-hack"] [dependencies] anyhow = "1" -easy-ext = "1" futures = { version = "0.3", default-features = false, features = ["alloc"] } futures-async-stream = { workspace = true } itertools = "0.11" diff --git a/src/source/benches/json_parser.rs b/src/source/benches/json_parser.rs index 70df93b902f57..e54a51befa9f1 100644 --- a/src/source/benches/json_parser.rs +++ b/src/source/benches/json_parser.rs @@ -85,11 +85,11 @@ fn generate_json_row(rng: &mut impl Rng) -> String { ) } -fn generate_json_rows() -> Vec>> { +fn generate_json_rows() -> Vec> { let mut rng = rand::thread_rng(); let mut records = Vec::with_capacity(NUM_RECORDS); for _ in 0..NUM_RECORDS { - records.push(Some(generate_json_row(&mut rng).into_bytes())); + records.push(generate_json_row(&mut rng).into_bytes()); } records } diff --git a/src/source/src/connector_source.rs b/src/source/src/connector_source.rs index df0dc8e147a59..733ba6a8c4a83 100644 --- a/src/source/src/connector_source.rs +++ b/src/source/src/connector_source.rs @@ -16,17 +16,24 @@ use std::collections::HashMap; use std::sync::Arc; use futures::future::try_join_all; +use futures::stream::pending; use futures::StreamExt; +use futures_async_stream::try_stream; use itertools::Itertools; use risingwave_common::catalog::ColumnId; use risingwave_common::error::ErrorCode::ConnectorError; -use risingwave_common::error::{internal_error, Result}; +use risingwave_common::error::{internal_error, Result, RwError}; use risingwave_common::util::select_all; +use risingwave_connector::dispatch_source_prop; use risingwave_connector::parser::{CommonParserConfig, ParserConfig, SpecificParserConfig}; +use risingwave_connector::source::filesystem::{FsPage, FsPageItem, S3SplitEnumerator}; use risingwave_connector::source::{ - BoxSourceWithStateStream, Column, ConnectorProperties, ConnectorState, SourceColumnDesc, - SourceContext, SplitReaderImpl, + create_split_reader, BoxSourceWithStateStream, BoxTryStream, Column, ConnectorProperties, + ConnectorState, FsFilterCtrlCtx, FsListInner, SourceColumnDesc, SourceContext, + SourceEnumeratorContext, SplitEnumerator, SplitReader, }; +use tokio::time; +use tokio::time::{Duration, MissedTickBehavior}; #[derive(Clone, Debug)] pub struct ConnectorSource { @@ -36,6 +43,15 @@ pub struct ConnectorSource { pub connector_message_buffer_size: usize, } +#[derive(Clone, Debug)] +pub struct FsListCtrlContext { + pub interval: Duration, + pub last_tick: Option, + + pub filter_ctx: FsFilterCtrlCtx, +} +pub type FsListCtrlContextRef = Arc; + impl ConnectorSource { pub fn new( properties: HashMap, @@ -72,12 +88,34 @@ impl ConnectorSource { .collect::>>() } + pub async fn get_source_list(&self) -> Result> { + let config = self.config.clone(); + let lister = match config { + ConnectorProperties::S3(prop) => { + S3SplitEnumerator::new(*prop, Arc::new(SourceEnumeratorContext::default())).await? + } + other => return Err(internal_error(format!("Unsupported source: {:?}", other))), + }; + + Ok(build_fs_list_stream( + FsListCtrlContext { + interval: Duration::from_secs(60), + last_tick: None, + filter_ctx: FsFilterCtrlCtx, + }, + lister, + )) + } + pub async fn stream_reader( &self, - splits: ConnectorState, + state: ConnectorState, column_ids: Vec, source_ctx: Arc, ) -> Result { + let Some(splits) = state else { + return Ok(pending().boxed()); + }; let config = self.config.clone(); let columns = self.get_target_columns(column_ids)?; @@ -99,53 +137,78 @@ impl ConnectorSource { }, }; - let readers = if config.support_multiple_splits() { - tracing::debug!( - "spawning connector split reader for multiple splits {:?}", - splits - ); - - let reader = SplitReaderImpl::create( - config, - splits, - parser_config, - source_ctx, - data_gen_columns, - ) - .await?; - - vec![reader] - } else { - let to_reader_splits = match splits { - Some(vec_split_impl) => vec_split_impl - .into_iter() - .map(|split| Some(vec![split])) - .collect::>(), - None => vec![None], + let support_multiple_splits = config.support_multiple_splits(); + + dispatch_source_prop!(config, prop, { + let readers = if support_multiple_splits { + tracing::debug!( + "spawning connector split reader for multiple splits {:?}", + splits + ); + + let reader = + create_split_reader(*prop, splits, parser_config, source_ctx, data_gen_columns) + .await?; + + vec![reader] + } else { + let to_reader_splits = splits.into_iter().map(|split| vec![split]); + + try_join_all(to_reader_splits.into_iter().map(|splits| { + tracing::debug!("spawning connector split reader for split {:?}", splits); + let props = prop.clone(); + let data_gen_columns = data_gen_columns.clone(); + let parser_config = parser_config.clone(); + // TODO: is this reader split across multiple threads...? Realistically, we want + // source_ctx to live in a single actor. + let source_ctx = source_ctx.clone(); + async move { + create_split_reader( + *props, + splits, + parser_config, + source_ctx, + data_gen_columns, + ) + .await + } + })) + .await? }; - try_join_all(to_reader_splits.into_iter().map(|state| { - tracing::debug!("spawning connector split reader for split {:?}", state); - let props = config.clone(); - let data_gen_columns = data_gen_columns.clone(); - let parser_config = parser_config.clone(); - // TODO: is this reader split across multiple threads...? Realistically, we want - // source_ctx to live in a single actor. - let source_ctx = source_ctx.clone(); - async move { - SplitReaderImpl::create( - props, - state, - parser_config, - source_ctx, - data_gen_columns, - ) - .await - } - })) - .await? - }; + Ok(select_all(readers.into_iter().map(|r| r.into_stream())).boxed()) + }) + } +} + +#[try_stream(boxed, ok = FsPage, error = RwError)] +async fn build_fs_list_stream( + mut ctrl_ctx: FsListCtrlContext, + mut list_op: impl FsListInner + Send + 'static, +) { + let mut interval = time::interval(ctrl_ctx.interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + // controlling whether request for next page + fn page_ctrl_logic(_ctx: &FsListCtrlContext, has_finished: bool, _page_num: usize) -> bool { + !has_finished + } - Ok(select_all(readers.into_iter().map(|r| r.into_stream())).boxed()) + loop { + let mut page_num = 0; + ctrl_ctx.last_tick = Some(time::Instant::now()); + 'inner: loop { + let (fs_page, has_finished) = list_op.get_next_page::().await?; + let matched_items = fs_page + .into_iter() + .filter(|item| list_op.filter_policy(&ctrl_ctx.filter_ctx, page_num, item)) + .collect_vec(); + yield matched_items; + page_num += 1; + if !page_ctrl_logic(&ctrl_ctx, has_finished, page_num) { + break 'inner; + } + } + interval.tick().await; } } diff --git a/src/source/src/fs_connector_source.rs b/src/source/src/fs_connector_source.rs index daee19569db0f..671f5b99c5bae 100644 --- a/src/source/src/fs_connector_source.rs +++ b/src/source/src/fs_connector_source.rs @@ -12,15 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. +// *** NOTICE: TO BE DEPRECATED *** // + use std::collections::HashMap; use std::sync::Arc; +use futures::stream::pending; +use futures::StreamExt; use risingwave_common::catalog::ColumnId; use risingwave_common::error::ErrorCode::ConnectorError; -use risingwave_common::error::{internal_error, Result, RwError}; +use risingwave_common::error::{internal_error, Result}; +use risingwave_connector::dispatch_source_prop; use risingwave_connector::parser::{CommonParserConfig, ParserConfig, SpecificParserConfig}; use risingwave_connector::source::{ - ConnectorProperties, ConnectorState, SourceColumnDesc, SourceContext, SplitReaderImpl, + create_split_reader, BoxSourceWithStateStream, ConnectorProperties, ConnectorState, + SourceColumnDesc, SourceContext, SplitReader, }; #[derive(Clone, Debug)] @@ -77,7 +83,7 @@ impl FsConnectorSource { state: ConnectorState, column_ids: Vec, source_ctx: Arc, - ) -> Result { + ) -> Result { let config = self.config.clone(); let columns = self.get_target_columns(column_ids)?; @@ -87,8 +93,16 @@ impl FsConnectorSource { rw_columns: columns, }, }; - SplitReaderImpl::create(config, state, parser_config, source_ctx, None) - .await - .map_err(RwError::from) + let stream = match state { + None => pending().boxed(), + Some(splits) => { + dispatch_source_prop!(config, prop, { + create_split_reader(*prop, splits, parser_config, source_ctx, None) + .await? + .into_stream() + }) + } + }; + Ok(stream) } } diff --git a/src/source/src/lib.rs b/src/source/src/lib.rs index 30c7d90cfe771..aaa045c607c95 100644 --- a/src/source/src/lib.rs +++ b/src/source/src/lib.rs @@ -14,13 +14,13 @@ #![allow(clippy::derive_partial_eq_without_eq)] #![feature(trait_alias)] -#![feature(binary_heap_drain_sorted)] #![feature(lint_reasons)] #![feature(result_option_inspect)] -#![feature(generators)] +#![feature(coroutines)] #![feature(hash_extract_if)] #![feature(type_alias_impl_trait)] #![feature(box_patterns)] +#![feature(stmt_expr_attributes)] pub use table::*; diff --git a/src/source/src/source_desc.rs b/src/source/src/source_desc.rs index 4d4b9f9cb5b80..161bbc41ceb63 100644 --- a/src/source/src/source_desc.rs +++ b/src/source/src/source_desc.rs @@ -18,14 +18,12 @@ use std::sync::Arc; use risingwave_common::catalog::ColumnDesc; use risingwave_common::error::ErrorCode::ProtocolError; use risingwave_common::error::{Result, RwError}; -use risingwave_connector::parser::SpecificParserConfig; +use risingwave_connector::parser::{EncodingProperties, ProtocolProperties, SpecificParserConfig}; use risingwave_connector::source::monitor::SourceMetrics; -use risingwave_connector::source::{ - SourceColumnDesc, SourceColumnType, SourceEncode, SourceFormat, SourceStruct, -}; +use risingwave_connector::source::{ConnectorProperties, SourceColumnDesc, SourceColumnType}; use risingwave_connector::ConnectorParams; use risingwave_pb::catalog::PbStreamSourceInfo; -use risingwave_pb::plan_common::{PbColumnCatalog, PbEncodeType, PbFormatType, RowFormatType}; +use risingwave_pb::plan_common::PbColumnCatalog; use crate::connector_source::ConnectorSource; use crate::fs_connector_source::FsConnectorSource; @@ -33,19 +31,19 @@ use crate::fs_connector_source::FsConnectorSource; pub const DEFAULT_CONNECTOR_MESSAGE_BUFFER_SIZE: usize = 16; /// `SourceDesc` describes a stream source. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct SourceDesc { pub source: ConnectorSource, - pub source_struct: SourceStruct, pub columns: Vec, pub metrics: Arc, + + pub is_new_fs_source: bool, } /// `FsSourceDesc` describes a stream source. #[derive(Debug)] pub struct FsSourceDesc { pub source: FsConnectorSource, - pub source_struct: SourceStruct, pub columns: Vec, pub metrics: Arc, } @@ -101,12 +99,16 @@ impl SourceDescBuilder { columns } - pub fn build(self) -> Result { + pub fn build(mut self) -> Result { let columns = self.column_catalogs_to_source_column_descs(); - let source_struct = extract_source_struct(&self.source_info)?; - let psrser_config = - SpecificParserConfig::new(source_struct, &self.source_info, &self.properties)?; + let psrser_config = SpecificParserConfig::new(&self.source_info, &self.properties)?; + + let is_new_fs_source = ConnectorProperties::is_new_fs_connector_hash_map(&self.properties); + if is_new_fs_source { + // new fs source requires `connector='s3_v2' but we simply reuse S3 connector` + ConnectorProperties::rewrite_upstream_source_key_hash_map(&mut self.properties); + } let source = ConnectorSource::new( self.properties, @@ -117,9 +119,9 @@ impl SourceDescBuilder { Ok(SourceDesc { source, - source_struct, columns, metrics: self.metrics, + is_new_fs_source, }) } @@ -128,9 +130,16 @@ impl SourceDescBuilder { } pub fn build_fs_source_desc(&self) -> Result { - let source_struct = extract_source_struct(&self.source_info)?; - match (source_struct.format, source_struct.encode) { - (SourceFormat::Plain, SourceEncode::Csv | SourceEncode::Json) => {} + let parser_config = SpecificParserConfig::new(&self.source_info, &self.properties)?; + + match ( + &parser_config.protocol_config, + &parser_config.encoding_config, + ) { + ( + ProtocolProperties::Plain, + EncodingProperties::Csv(_) | EncodingProperties::Json(_), + ) => {} (format, encode) => { return Err(RwError::from(ProtocolError(format!( "Unsupported combination of format {:?} and encode {:?}", @@ -141,9 +150,6 @@ impl SourceDescBuilder { let columns = self.column_catalogs_to_source_column_descs(); - let parser_config = - SpecificParserConfig::new(source_struct, &self.source_info, &self.properties)?; - let source = FsConnectorSource::new( self.properties.clone(), columns.clone(), @@ -156,71 +162,12 @@ impl SourceDescBuilder { Ok(FsSourceDesc { source, - source_struct, columns, metrics: self.metrics.clone(), }) } } -// Only return valid (format, encode) -pub fn extract_source_struct(info: &PbStreamSourceInfo) -> Result { - // old version meta. - if let Ok(format) = info.get_row_format() { - let (format, encode) = match format { - RowFormatType::Json => (SourceFormat::Plain, SourceEncode::Json), - RowFormatType::Protobuf => (SourceFormat::Plain, SourceEncode::Protobuf), - RowFormatType::DebeziumJson => (SourceFormat::Debezium, SourceEncode::Json), - RowFormatType::Avro => (SourceFormat::Plain, SourceEncode::Avro), - RowFormatType::Maxwell => (SourceFormat::Maxwell, SourceEncode::Json), - RowFormatType::CanalJson => (SourceFormat::Canal, SourceEncode::Json), - RowFormatType::Csv => (SourceFormat::Plain, SourceEncode::Csv), - RowFormatType::Native => (SourceFormat::Native, SourceEncode::Native), - RowFormatType::DebeziumAvro => (SourceFormat::Debezium, SourceEncode::Avro), - RowFormatType::UpsertJson => (SourceFormat::Upsert, SourceEncode::Json), - RowFormatType::UpsertAvro => (SourceFormat::Upsert, SourceEncode::Avro), - RowFormatType::DebeziumMongoJson => (SourceFormat::DebeziumMongo, SourceEncode::Json), - RowFormatType::Bytes => (SourceFormat::Plain, SourceEncode::Bytes), - RowFormatType::RowUnspecified => unreachable!(), - }; - return Ok(SourceStruct::new(format, encode)); - } - let source_format = info.get_format()?; - let source_encode = info.get_row_encode()?; - let (format, encode) = match (source_format, source_encode) { - (PbFormatType::Plain, PbEncodeType::Json) => (SourceFormat::Plain, SourceEncode::Json), - (PbFormatType::Plain, PbEncodeType::Protobuf) => { - (SourceFormat::Plain, SourceEncode::Protobuf) - } - (PbFormatType::Debezium, PbEncodeType::Json) => { - (SourceFormat::Debezium, SourceEncode::Json) - } - (PbFormatType::Plain, PbEncodeType::Avro) => (SourceFormat::Plain, SourceEncode::Avro), - (PbFormatType::Maxwell, PbEncodeType::Json) => (SourceFormat::Maxwell, SourceEncode::Json), - (PbFormatType::Canal, PbEncodeType::Json) => (SourceFormat::Canal, SourceEncode::Json), - (PbFormatType::Plain, PbEncodeType::Csv) => (SourceFormat::Plain, SourceEncode::Csv), - (PbFormatType::Native, PbEncodeType::Native) => { - (SourceFormat::Native, SourceEncode::Native) - } - (PbFormatType::Debezium, PbEncodeType::Avro) => { - (SourceFormat::Debezium, SourceEncode::Avro) - } - (PbFormatType::Upsert, PbEncodeType::Json) => (SourceFormat::Upsert, SourceEncode::Json), - (PbFormatType::Upsert, PbEncodeType::Avro) => (SourceFormat::Upsert, SourceEncode::Avro), - (PbFormatType::DebeziumMongo, PbEncodeType::Json) => { - (SourceFormat::DebeziumMongo, SourceEncode::Json) - } - (PbFormatType::Plain, PbEncodeType::Bytes) => (SourceFormat::Plain, SourceEncode::Bytes), - (format, encode) => { - return Err(RwError::from(ProtocolError(format!( - "Unsupported combination of format {:?} and encode {:?}", - format, encode - )))); - } - }; - Ok(SourceStruct::new(format, encode)) -} - pub mod test_utils { use std::collections::HashMap; diff --git a/src/source/src/table.rs b/src/source/src/table.rs index 3781bee79b1b4..08aa3a38aeaaa 100644 --- a/src/source/src/table.rs +++ b/src/source/src/table.rs @@ -317,11 +317,8 @@ mod tests { macro_rules! write_chunk { ($i:expr) => {{ - let chunk = StreamChunk::new( - vec![Op::Insert], - vec![I64Array::from_iter([$i]).into_ref()], - None, - ); + let chunk = + StreamChunk::new(vec![Op::Insert], vec![I64Array::from_iter([$i]).into_ref()]); write_handle.write_chunk(chunk).await.unwrap(); }}; } @@ -362,11 +359,7 @@ mod tests { assert_matches!(reader.next().await.unwrap()?, TxnMsg::Begin(_)); - let chunk = StreamChunk::new( - vec![Op::Insert], - vec![I64Array::from_iter([1]).into_ref()], - None, - ); + let chunk = StreamChunk::new(vec![Op::Insert], vec![I64Array::from_iter([1]).into_ref()]); write_handle.write_chunk(chunk).await.unwrap(); assert_matches!(reader.next().await.unwrap()?, txn_msg => { diff --git a/src/sqlparser/src/ast/data_type.rs b/src/sqlparser/src/ast/data_type.rs index 13f6654903d54..e8ad404d4d7d6 100644 --- a/src/sqlparser/src/ast/data_type.rs +++ b/src/sqlparser/src/ast/data_type.rs @@ -56,6 +56,8 @@ pub enum DataType { Interval, /// Regclass used in postgresql serial Regclass, + /// Regproc used in postgresql function + Regproc, /// Text Text, /// Bytea @@ -97,6 +99,7 @@ impl fmt::Display for DataType { } DataType::Interval => write!(f, "INTERVAL"), DataType::Regclass => write!(f, "REGCLASS"), + DataType::Regproc => write!(f, "REGPROC"), DataType::Text => write!(f, "TEXT"), DataType::Bytea => write!(f, "BYTEA"), DataType::Array(ty) => write!(f, "{}[]", ty), diff --git a/src/sqlparser/src/ast/mod.rs b/src/sqlparser/src/ast/mod.rs index 83d16d96245ba..9470724f9a0c7 100644 --- a/src/sqlparser/src/ast/mod.rs +++ b/src/sqlparser/src/ast/mod.rs @@ -1294,6 +1294,9 @@ pub enum Statement { /// /// Note: RisingWave specific statement. Flush, + /// WAIT for ALL running stream jobs to finish. + /// It will block the current session the condition is met. + Wait, } impl fmt::Display for Statement { @@ -1787,6 +1790,9 @@ impl fmt::Display for Statement { Statement::Flush => { write!(f, "FLUSH") } + Statement::Wait => { + write!(f, "WAIT") + } Statement::Begin { modes } => { write!(f, "BEGIN")?; if !modes.is_empty() { diff --git a/src/sqlparser/src/ast/query.rs b/src/sqlparser/src/ast/query.rs index cc703e5b81a38..f018b853f3330 100644 --- a/src/sqlparser/src/ast/query.rs +++ b/src/sqlparser/src/ast/query.rs @@ -387,11 +387,14 @@ pub enum TableFactor { subquery: Box, alias: Option, }, - /// `[ AS ]` + /// `(args)[ AS ]` + /// + /// Note that scalar functions can also be used in this way. TableFunction { name: ObjectName, alias: Option, args: Vec, + with_ordinality: bool, }, /// Represents a parenthesized table factor. The SQL spec only allows a /// join expression (`(foo bar [ baz ... ])`) to be nested, @@ -433,8 +436,16 @@ impl fmt::Display for TableFactor { } Ok(()) } - TableFactor::TableFunction { name, alias, args } => { + TableFactor::TableFunction { + name, + alias, + args, + with_ordinality, + } => { write!(f, "{}({})", name, display_comma_separated(args))?; + if *with_ordinality { + write!(f, " WITH ORDINALITY")?; + } if let Some(alias) = alias { write!(f, " AS {}", alias)?; } diff --git a/src/sqlparser/src/ast/statement.rs b/src/sqlparser/src/ast/statement.rs index a4d993a126f64..58fb2d50c6287 100644 --- a/src/sqlparser/src/ast/statement.rs +++ b/src/sqlparser/src/ast/statement.rs @@ -215,6 +215,7 @@ impl SourceSchema { impl fmt::Display for SourceSchema { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ROW FORMAT ")?; match self { SourceSchema::Protobuf(protobuf_schema) => write!(f, "PROTOBUF {}", protobuf_schema), SourceSchema::Json => write!(f, "JSON"), @@ -233,129 +234,6 @@ impl fmt::Display for SourceSchema { } } -/// will be deprecated and be replaced by Format and Encode -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum RowFormat { - Protobuf, // Keyword::PROTOBUF - Json, // Keyword::JSON - DebeziumJson, // Keyword::DEBEZIUM_JSON - DebeziumMongoJson, // Keyword::DEBEZIUM_MONGO_JSON - UpsertJson, // Keyword::UPSERT_JSON - Avro, // Keyword::AVRO - UpsertAvro, // Keyword::UpsertAVRO - Maxwell, // Keyword::MAXWELL - CanalJson, // Keyword::CANAL_JSON - Csv, // Keyword::CSV - DebeziumAvro, // Keyword::DEBEZIUM_AVRO - Bytes, // Keyword::BYTES - Native, -} - -impl RowFormat { - pub fn from_keyword(s: &str) -> Result { - Ok(match s { - "JSON" => RowFormat::Json, - "UPSERT_JSON" => RowFormat::UpsertJson, - "PROTOBUF" => RowFormat::Protobuf, - "DEBEZIUM_JSON" => RowFormat::DebeziumJson, - "DEBEZIUM_MONGO_JSON" => RowFormat::DebeziumMongoJson, - "AVRO" => RowFormat::Avro, - "UPSERT_AVRO" => RowFormat::UpsertAvro, - "MAXWELL" => RowFormat::Maxwell, - "CANAL_JSON" => RowFormat::CanalJson, - "CSV" => RowFormat::Csv, - "DEBEZIUM_AVRO" => RowFormat::DebeziumAvro, - "BYTES" => RowFormat::Bytes, - _ => return Err(ParserError::ParserError( - "expected JSON | UPSERT_JSON | PROTOBUF | DEBEZIUM_JSON | DEBEZIUM_AVRO | AVRO | UPSERT_AVRO | MAXWELL | CANAL_JSON | BYTES after ROW FORMAT".to_string(), - )) - }) - } - - /// a compatibility layer, return (format, row_encode) - pub fn to_format_v2(&self) -> (Format, Encode) { - let format = match self { - RowFormat::Protobuf => Format::Plain, - RowFormat::Json => Format::Plain, - RowFormat::DebeziumJson => Format::Debezium, - RowFormat::DebeziumMongoJson => Format::DebeziumMongo, - RowFormat::UpsertJson => Format::Upsert, - RowFormat::Avro => Format::Plain, - RowFormat::UpsertAvro => Format::Upsert, - RowFormat::Maxwell => Format::Maxwell, - RowFormat::CanalJson => Format::Canal, - RowFormat::Csv => Format::Plain, - RowFormat::DebeziumAvro => Format::Debezium, - RowFormat::Bytes => Format::Plain, - RowFormat::Native => Format::Native, - }; - - let encode = match self { - RowFormat::Protobuf => Encode::Protobuf, - RowFormat::Json => Encode::Json, - RowFormat::DebeziumJson => Encode::Json, - RowFormat::DebeziumMongoJson => Encode::Json, - RowFormat::UpsertJson => Encode::Json, - RowFormat::Avro => Encode::Avro, - RowFormat::UpsertAvro => Encode::Avro, - RowFormat::Maxwell => Encode::Json, - RowFormat::CanalJson => Encode::Json, - RowFormat::Csv => Encode::Csv, - RowFormat::DebeziumAvro => Encode::Avro, - RowFormat::Bytes => Encode::Bytes, - RowFormat::Native => Encode::Native, - }; - (format, encode) - } - - /// a compatibility layer - pub fn from_format_v2(format: &Format, encode: &Encode) -> Result { - Ok(match (format, encode) { - (Format::Native, Encode::Native) => RowFormat::Native, - (Format::Native, _) => unreachable!(), - (_, Encode::Native) => unreachable!(), - (Format::Debezium, Encode::Avro) => RowFormat::DebeziumAvro, - (Format::Debezium, Encode::Json) => RowFormat::DebeziumJson, - (Format::Debezium, _) => { - return Err(ParserError::ParserError( - "The DEBEZIUM format only support AVRO and JSON Encoding".to_string(), - )) - } - (Format::DebeziumMongo, Encode::Json) => RowFormat::DebeziumMongoJson, - (Format::DebeziumMongo, _) => { - return Err(ParserError::ParserError( - "The DEBEZIUM_MONGO format only support JSON Encoding".to_string(), - )) - } - (Format::Maxwell, Encode::Json) => RowFormat::Maxwell, - (Format::Maxwell, _) => { - return Err(ParserError::ParserError( - "The MAXWELL format only support JSON Encoding".to_string(), - )) - } - (Format::Canal, Encode::Json) => RowFormat::CanalJson, - (Format::Canal, _) => { - return Err(ParserError::ParserError( - "The CANAL format only support JSON Encoding".to_string(), - )) - } - (Format::Upsert, Encode::Avro) => RowFormat::UpsertAvro, - (Format::Upsert, Encode::Json) => RowFormat::UpsertJson, - (Format::Upsert, _) => { - return Err(ParserError::ParserError( - "The UPSERT format only support AVRO and JSON Encoding".to_string(), - )) - } - (Format::Plain, Encode::Avro) => RowFormat::Avro, - (Format::Plain, Encode::Csv) => RowFormat::Csv, - (Format::Plain, Encode::Protobuf) => RowFormat::Protobuf, - (Format::Plain, Encode::Json) => RowFormat::Json, - (Format::Plain, Encode::Bytes) => RowFormat::Bytes, - }) - } -} - #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Format { @@ -396,9 +274,10 @@ impl Format { "CANAL" => Format::Canal, "PLAIN" => Format::Plain, "UPSERT" => Format::Upsert, + "NATIVE" => Format::Native, // used internally for schema change _ => { return Err(ParserError::ParserError( - "expected CANAL | PROTOBUF | DEBEZIUM | MAXWELL | Plain after FORMAT" + "expected CANAL | PROTOBUF | DEBEZIUM | MAXWELL | PLAIN | NATIVE after FORMAT" .to_string(), )) } @@ -415,6 +294,7 @@ pub enum Encode { Json, // Keyword::JSON Bytes, // Keyword::BYTES Native, + Template, } // TODO: unify with `from_keyword` @@ -430,6 +310,7 @@ impl fmt::Display for Encode { Encode::Json => "JSON", Encode::Bytes => "BYTES", Encode::Native => "NATIVE", + Encode::Template => "TEMPLATE", } ) } @@ -443,11 +324,12 @@ impl Encode { "CSV" => Encode::Csv, "PROTOBUF" => Encode::Protobuf, "JSON" => Encode::Json, - _ => { - return Err(ParserError::ParserError( - "expected AVRO | BYTES | CSV | PROTOBUF | JSON after Encode".to_string(), - )) - } + "TEMPLATE" => Encode::Template, + "NATIVE" => Encode::Native, // used internally for schema change + _ => return Err(ParserError::ParserError( + "expected AVRO | BYTES | CSV | PROTOBUF | JSON | NATIVE | TEMPLATE after Encode" + .to_string(), + )), }) } } @@ -481,25 +363,12 @@ impl fmt::Display for CompatibleSourceSchema { } impl CompatibleSourceSchema { - pub fn into_source_schema( - self, - ) -> Result<(SourceSchema, Vec, Option), ParserError> { + pub fn into_source_schema_v2(self) -> (SourceSchemaV2, Option) { match self { - CompatibleSourceSchema::RowFormat(inner) => Ok(( - inner, - vec![], - Some("RisingWave will stop supporting the syntax \"ROW FORMAT\" in future versions, which will be changed to \"FORMAT ... ENCODE ...\" syntax.".to_string()), - )), - CompatibleSourceSchema::V2(inner) => { - inner.into_source_schema().map(|(s, ops)| (s, ops, None)) - } - } - } - - pub fn into_source_schema_v2(self) -> SourceSchemaV2 { - match self { - CompatibleSourceSchema::RowFormat(inner) => inner.into_source_schema_v2(), - CompatibleSourceSchema::V2(inner) => inner, + CompatibleSourceSchema::RowFormat(inner) => ( + inner.into_source_schema_v2(), + Some("RisingWave will stop supporting the syntax \"ROW FORMAT\" in future versions, which will be changed to \"FORMAT ... ENCODE ...\" syntax.".to_string())), + CompatibleSourceSchema::V2(inner) => (inner, None), } } } @@ -510,7 +379,7 @@ impl From for CompatibleSourceSchema { } } -pub fn parse_source_shcema(p: &mut Parser) -> Result { +fn parse_source_schema(p: &mut Parser) -> Result { if p.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) { p.expect_keyword(Keyword::FORMAT)?; let id = p.parse_identifier()?; @@ -557,18 +426,20 @@ pub fn parse_source_shcema(p: &mut Parser) -> Result SourceSchema::Native, // used internally by schema change "DEBEZIUM_AVRO" => { impl_parse_to!(avro_schema: DebeziumAvroSchema, p); SourceSchema::DebeziumAvro(avro_schema) } - "BYTES" => { - SourceSchema::Bytes + "BYTES" => SourceSchema::Bytes, + _ => { + return Err(ParserError::ParserError( + "expected JSON | UPSERT_JSON | PROTOBUF | DEBEZIUM_JSON | DEBEZIUM_AVRO \ + | AVRO | UPSERT_AVRO | MAXWELL | CANAL_JSON | BYTES | NATIVE after ROW FORMAT" + .to_string(), + )) } - _ => return Err(ParserError::ParserError( - "expected JSON | UPSERT_JSON | PROTOBUF | DEBEZIUM_JSON | DEBEZIUM_AVRO | AVRO | UPSERT_AVRO | MAXWELL | CANAL_JSON | BYTES after ROW FORMAT".to_string(), - )) - - } ; + }; Ok(CompatibleSourceSchema::RowFormat(schema)) } else { Err(ParserError::ParserError( @@ -577,7 +448,101 @@ pub fn parse_source_shcema(p: &mut Parser) -> Result bool { + (self.peek_nth_any_of_keywords(0, &[Keyword::ROW]) + && self.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) // ROW FORMAT + || self.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) // FORMAT + } + + /// Parse the source schema. The behavior depends on the `connector` type. + pub fn parse_source_schema_with_connector( + &mut self, + connector: &str, + ) -> Result { + // row format for cdc source must be debezium json + // row format for nexmark source must be native + // default row format for datagen source is native + if connector.contains("-cdc") { + let expected = SourceSchemaV2::debezium_json(); + if self.peek_source_schema_format() { + let schema = parse_source_schema(self)?.into_source_schema_v2().0; + if schema != expected { + return Err(ParserError::ParserError(format!( + "Row format for CDC connectors should be \ + either omitted or set to `{expected}`", + ))); + } + } + Ok(expected.into()) + } else if connector.contains("nexmark") { + let expected = SourceSchemaV2::native(); + if self.peek_source_schema_format() { + let schema = parse_source_schema(self)?.into_source_schema_v2().0; + if schema != expected { + return Err(ParserError::ParserError(format!( + "Row format for nexmark connectors should be \ + either omitted or set to `{expected}`", + ))); + } + } + Ok(expected.into()) + } else if connector.contains("datagen") { + Ok(if self.peek_source_schema_format() { + parse_source_schema(self)? + } else { + SourceSchemaV2::native().into() + }) + } else { + Ok(parse_source_schema(self)?) + } + } + + /// Parse `FORMAT ... ENCODE ... (...)` in `CREATE SINK`. + /// + /// TODO: After [`SourceSchemaV2`] and [`SinkSchema`] merge, call this in [`parse_source_schema`]. + pub fn parse_schema(&mut self) -> Result, ParserError> { + if !self.parse_keyword(Keyword::FORMAT) { + return Ok(None); + } + + let id = self.parse_identifier()?; + let s = id.value.to_ascii_uppercase(); + let format = Format::from_keyword(&s)?; + self.expect_keyword(Keyword::ENCODE)?; + let id = self.parse_identifier()?; + let s = id.value.to_ascii_uppercase(); + let row_encode = Encode::from_keyword(&s)?; + let row_options = self.parse_options()?; + + Ok(Some(SinkSchema { + format, + row_encode, + row_options, + })) + } +} + impl SourceSchemaV2 { + /// Create a new source schema with `Debezium` format and `Json` encoding. + pub const fn debezium_json() -> Self { + SourceSchemaV2 { + format: Format::Debezium, + row_encode: Encode::Json, + row_options: Vec::new(), + } + } + + /// Create a new source schema with `Native` format and encoding. + pub const fn native() -> Self { + SourceSchemaV2 { + format: Format::Native, + row_encode: Encode::Native, + row_options: Vec::new(), + } + } + pub fn gen_options(&self) -> Result, ParserError> { self.row_options .iter() @@ -594,112 +559,6 @@ impl SourceSchemaV2 { .try_collect() } - /// just a temporal compatibility layer will be removed soon(so the implementation is a little - /// dirty) - #[allow(deprecated)] - pub fn into_source_schema(self) -> Result<(SourceSchema, Vec), ParserError> { - let options: BTreeMap = self - .row_options - .iter() - .cloned() - .map(|x| match x.value { - Value::SingleQuotedString(s) => Ok((x.name.real_value(), s)), - Value::Number(n) => Ok((x.name.real_value(), n)), - Value::Boolean(b) => Ok((x.name.real_value(), b.to_string())), - _ => Err(ParserError::ParserError( - "`row format options` only support single quoted string value".to_owned(), - )), - }) - .try_collect()?; - - let try_consume_string_from_options = - |row_options: &BTreeMap, key: &str| -> Option { - row_options.get(key).cloned().map(AstString) - }; - let consume_string_from_options = - |row_options: &BTreeMap, key: &str| -> Result { - try_consume_string_from_options(row_options, key).ok_or_else(|| { - ParserError::ParserError(format!("missing field {} in row format options", key)) - }) - }; - let get_schema_location = - |row_options: &BTreeMap| -> Result<(AstString, bool), ParserError> { - let schema_location = - try_consume_string_from_options(row_options, "schema.location"); - let schema_registry = - try_consume_string_from_options(row_options, "schema.registry"); - match (schema_location, schema_registry) { - (None, None) => Err(ParserError::ParserError( - "missing either a schema location or a schema registry".to_string(), - )), - (None, Some(schema_registry)) => Ok((schema_registry, true)), - (Some(schema_location), None) => Ok((schema_location, false)), - (Some(_), Some(_)) => Err(ParserError::ParserError( - "only need either the schema location or the schema registry".to_string(), - )), - } - }; - let row_format = RowFormat::from_format_v2(&self.format, &self.row_encode)?; - Ok(( - match row_format { - RowFormat::Protobuf => { - let (row_schema_location, use_schema_registry) = get_schema_location(&options)?; - SourceSchema::Protobuf(ProtobufSchema { - message_name: consume_string_from_options(&options, "message")?, - row_schema_location, - use_schema_registry, - }) - } - RowFormat::Json => SourceSchema::Json, - RowFormat::DebeziumJson => SourceSchema::DebeziumJson, - RowFormat::DebeziumMongoJson => SourceSchema::DebeziumMongoJson, - RowFormat::UpsertJson => SourceSchema::UpsertJson, - RowFormat::Avro => { - let (row_schema_location, use_schema_registry) = get_schema_location(&options)?; - SourceSchema::Avro(AvroSchema { - row_schema_location, - use_schema_registry, - }) - } - RowFormat::UpsertAvro => { - let (row_schema_location, use_schema_registry) = get_schema_location(&options)?; - SourceSchema::UpsertAvro(AvroSchema { - row_schema_location, - use_schema_registry, - }) - } - RowFormat::Maxwell => SourceSchema::Maxwell, - RowFormat::CanalJson => SourceSchema::CanalJson, - RowFormat::Csv => { - let chars = consume_string_from_options(&options, "delimiter")?.0; - let delimiter = get_delimiter(chars.as_str())?; - let has_header = try_consume_string_from_options(&options, "without_header") - .map(|s| s.0 == "false") - .unwrap_or(true); - SourceSchema::Csv(CsvInfo { - delimiter, - has_header, - }) - } - RowFormat::DebeziumAvro => { - let (row_schema_location, use_schema_registry) = get_schema_location(&options)?; - if !use_schema_registry { - return Err(ParserError::ParserError( - "schema location for DEBEZIUM_AVRO row format is not supported" - .to_string(), - )); - } - SourceSchema::DebeziumAvro(DebeziumAvroSchema { - row_schema_location, - }) - } - RowFormat::Bytes => SourceSchema::Bytes, - RowFormat::Native => SourceSchema::Native, - }, - self.row_options, - )) - } - pub fn row_options(&self) -> &[SqlOption] { self.row_options.as_ref() } @@ -896,49 +755,7 @@ impl ParseTo for CreateSourceStatement { // row format for cdc source must be debezium json // row format for nexmark source must be native // default row format for datagen source is native - let source_schema = if connector.contains("-cdc") { - if (p.peek_nth_any_of_keywords(0, &[Keyword::ROW]) - && p.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) - || p.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) - { - return Err(ParserError::ParserError("Row format for cdc connectors should not be set here because it is limited to debezium json".to_string())); - } - SourceSchemaV2 { - format: Format::Debezium, - row_encode: Encode::Json, - row_options: Default::default(), - } - .into() - } else if connector.contains("nexmark") { - if (p.peek_nth_any_of_keywords(0, &[Keyword::ROW]) - && p.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) - || p.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) - { - return Err(ParserError::ParserError("Row format for nexmark connectors should not be set here because it is limited to internal native format".to_string())); - } - SourceSchemaV2 { - format: Format::Native, - row_encode: Encode::Native, - row_options: Default::default(), - } - .into() - } else if connector.contains("datagen") { - if (p.peek_nth_any_of_keywords(0, &[Keyword::ROW]) - && p.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) - || p.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) - { - parse_source_shcema(p)? - } else { - SourceSchemaV2 { - format: Format::Native, - row_encode: Encode::Native, - row_options: Default::default(), - } - .into() - } - } else { - parse_source_shcema(p)? - }; + let source_schema = p.parse_source_schema_with_connector(&connector)?; Ok(Self { if_not_exists, @@ -1006,6 +823,27 @@ impl fmt::Display for CreateSink { } } +/// Same as [`SourceSchemaV2`]. Will be merged in a dedicated rename PR. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SinkSchema { + pub format: Format, + pub row_encode: Encode, + pub row_options: Vec, +} + +impl fmt::Display for SinkSchema { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "FORMAT {} ENCODE {}", self.format, self.row_encode)?; + + if !self.row_options.is_empty() { + write!(f, " ({})", display_comma_separated(&self.row_options)) + } else { + Ok(()) + } + } +} + // sql_grammar!(CreateSinkStatement { // if_not_exists => [Keyword::IF, Keyword::NOT, Keyword::EXISTS], // sink_name: Ident, @@ -1022,6 +860,7 @@ pub struct CreateSinkStatement { pub sink_from: CreateSink, pub columns: Vec, pub emit_mode: Option, + pub sink_schema: Option, } impl ParseTo for CreateSinkStatement { @@ -1050,6 +889,8 @@ impl ParseTo for CreateSinkStatement { )); } + let sink_schema = p.parse_schema()?; + Ok(Self { if_not_exists, sink_name, @@ -1057,6 +898,7 @@ impl ParseTo for CreateSinkStatement { sink_from, columns, emit_mode, + sink_schema, }) } } @@ -1071,6 +913,9 @@ impl fmt::Display for CreateSinkStatement { v.push(format!("EMIT {}", emit_mode)); } impl_fmt_display!(with_properties, v, self); + if let Some(schema) = &self.sink_schema { + v.push(format!("{}", schema)); + } v.iter().join(" ").fmt(f) } } diff --git a/src/sqlparser/src/keywords.rs b/src/sqlparser/src/keywords.rs index 1d40afa8fb922..338fffc174bb1 100644 --- a/src/sqlparser/src/keywords.rs +++ b/src/sqlparser/src/keywords.rs @@ -351,6 +351,7 @@ define_keywords!( OPTION, OR, ORDER, + ORDINALITY, OTHERS, OUT, OUTER, @@ -396,6 +397,7 @@ define_keywords!( REFERENCING, REGCLASS, REGISTRY, + REGPROC, REGR_AVGX, REGR_AVGY, REGR_COUNT, @@ -539,6 +541,7 @@ define_keywords!( VIEWS, VIRTUAL, VOLATILE, + WAIT, WATERMARK, WHEN, WHENEVER, diff --git a/src/sqlparser/src/parser.rs b/src/sqlparser/src/parser.rs index 34ee42ef21ade..a6a08ab089ae9 100644 --- a/src/sqlparser/src/parser.rs +++ b/src/sqlparser/src/parser.rs @@ -259,6 +259,7 @@ impl Parser { Keyword::PREPARE => Ok(self.parse_prepare()?), Keyword::COMMENT => Ok(self.parse_comment()?), Keyword::FLUSH => Ok(Statement::Flush), + Keyword::WAIT => Ok(Statement::Wait), _ => self.expected( "an SQL statement", Token::Word(w).with_location(token.location), @@ -2433,62 +2434,11 @@ impl Parser { .iter() .find(|&opt| opt.name.real_value() == UPSTREAM_SOURCE_KEY); let connector = option.map(|opt| opt.value.to_string()); - // row format for cdc source must be debezium json - // row format for nexmark source must be native - // default row format for datagen source is native + let source_schema = if let Some(connector) = connector { - if connector.contains("-cdc") { - if (self.peek_nth_any_of_keywords(0, &[Keyword::ROW]) - && self.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) - || self.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) - { - return Err(ParserError::ParserError("Row format for cdc connectors should not be set here because it is limited to debezium json".to_string())); - } - Some( - SourceSchemaV2 { - format: Format::Debezium, - row_encode: Encode::Json, - row_options: Default::default(), - } - .into(), - ) - } else if connector.contains("nexmark") { - if (self.peek_nth_any_of_keywords(0, &[Keyword::ROW]) - && self.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) - || self.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) - { - return Err(ParserError::ParserError("Row format for nexmark connectors should not be set here because it is limited to internal native format".to_string())); - } - Some( - SourceSchemaV2 { - format: Format::Native, - row_encode: Encode::Native, - row_options: Default::default(), - } - .into(), - ) - } else if connector.contains("datagen") { - if (self.peek_nth_any_of_keywords(0, &[Keyword::ROW]) - && self.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) - || self.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) - { - Some(parse_source_shcema(self)?) - } else { - Some( - SourceSchemaV2 { - format: Format::Native, - row_encode: Encode::Native, - row_options: Default::default(), - } - .into(), - ) - } - } else { - Some(parse_source_shcema(self)?) - } + Some(self.parse_source_schema_with_connector(&connector)?) } else { - // Table is NOT created with an external connector. - None + None // Table is NOT created with an external connector. }; // Parse optional `AS ( query )` @@ -3287,6 +3237,7 @@ impl Parser { // parse_interval_literal for a taste. Keyword::INTERVAL => Ok(DataType::Interval), Keyword::REGCLASS => Ok(DataType::Regclass), + Keyword::REGPROC => Ok(DataType::Regproc), Keyword::TEXT => { if self.consume_token(&Token::LBracket) { // Note: this is postgresql-specific @@ -4276,8 +4227,15 @@ impl Parser { if !order_by.is_empty() { return parser_err!("Table-valued functions do not support ORDER BY clauses"); } + let with_ordinality = self.parse_keywords(&[Keyword::WITH, Keyword::ORDINALITY]); + let alias = self.parse_optional_table_alias(keywords::RESERVED_FOR_TABLE_ALIAS)?; - Ok(TableFactor::TableFunction { name, alias, args }) + Ok(TableFactor::TableFunction { + name, + alias, + args, + with_ordinality, + }) } else { let for_system_time_as_of_proctime = self.parse_for_system_time_as_of_proctime()?; let alias = self.parse_optional_table_alias(keywords::RESERVED_FOR_TABLE_ALIAS)?; diff --git a/src/sqlparser/tests/testdata/create.yaml b/src/sqlparser/tests/testdata/create.yaml index 92bdabc83048c..5509ccad53a04 100644 --- a/src/sqlparser/tests/testdata/create.yaml +++ b/src/sqlparser/tests/testdata/create.yaml @@ -48,6 +48,24 @@ formatted_sql: CREATE SINK IF NOT EXISTS snk FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') - input: CREATE SINK IF NOT EXISTS snk AS SELECT count(*) AS cnt FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') formatted_sql: CREATE SINK IF NOT EXISTS snk AS SELECT count(*) AS cnt FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') +- input: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic', type = 'append-only'); + formatted_sql: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic', type = 'append-only') +- input: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') format plain encode json; + formatted_sql: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') FORMAT PLAIN ENCODE JSON +- input: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') format upsert encode protobuf (schema.location = 'location', message = 'main_message'); + formatted_sql: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') FORMAT UPSERT ENCODE PROTOBUF (schema.location = 'location', message = 'main_message') +- input: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') format; + error_msg: |- + sql parser error: Expected identifier, found: ; at line:1, column:123 + Near " topic = 'test_topic') format;" +- input: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') format debezium; + error_msg: |- + sql parser error: Expected ENCODE, found: ; at line:1, column:132 + Near "topic = 'test_topic') format debezium" +- input: CREATE SINK snk FROM mv WITH (connector = 'kafka', properties.bootstrap.server = '127.0.0.1:9092', topic = 'test_topic') format debezium encode; + error_msg: |- + sql parser error: Expected identifier, found: ; at line:1, column:139 + Near " 'test_topic') format debezium encode;" - input: create user tmp createdb nocreatedb error_msg: 'sql parser error: conflicting or redundant options' - input: create user tmp createdb createdb diff --git a/src/sqlparser/tests/testdata/select.yaml b/src/sqlparser/tests/testdata/select.yaml index bbbb5a72bbdab..6aed3d2a4dc4c 100644 --- a/src/sqlparser/tests/testdata/select.yaml +++ b/src/sqlparser/tests/testdata/select.yaml @@ -29,10 +29,10 @@ formatted_sql: SELECT (CAST(ROW(1, 2, 3) AS foo)).v1.* - input: SELECT * FROM generate_series('2'::INT,'10'::INT,'2'::INT) formatted_sql: SELECT * FROM generate_series(CAST('2' AS INT), CAST('10' AS INT), CAST('2' AS INT)) - formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard(None)], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "generate_series", quote_style: None }]), alias: None, args: [Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("10")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int }))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard(None)], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "generate_series", quote_style: None }]), alias: None, args: [Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("10")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int }))], with_ordinality: false }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT * FROM unnest(Array[1,2,3]); formatted_sql: SELECT * FROM unnest(ARRAY[1, 2, 3]) - formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard(None)], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "unnest", quote_style: None }]), alias: None, args: [Unnamed(Expr(Array(Array { elem: [Value(Number("1")), Value(Number("2")), Value(Number("3"))], named: true })))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard(None)], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "unnest", quote_style: None }]), alias: None, args: [Unnamed(Expr(Array(Array { elem: [Value(Number("1")), Value(Number("2")), Value(Number("3"))], named: true })))], with_ordinality: false }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT id, fname, lname FROM customer WHERE salary <> 'Not Provided' AND salary <> '' formatted_sql: SELECT id, fname, lname FROM customer WHERE salary <> 'Not Provided' AND salary <> '' - input: SELECT id FROM customer WHERE NOT salary = '' @@ -102,7 +102,7 @@ formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Identifier(Ident { value: "id1", quote_style: None })), UnnamedExpr(Identifier(Ident { value: "a1", quote_style: None })), UnnamedExpr(Identifier(Ident { value: "id2", quote_style: None })), UnnamedExpr(Identifier(Ident { value: "a2", quote_style: None }))], from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: "stream", quote_style: None }]), alias: Some(TableAlias { name: Ident { value: "S", quote_style: None }, columns: [] }), for_system_time_as_of_proctime: false }, joins: [Join { relation: Table { name: ObjectName([Ident { value: "version", quote_style: None }]), alias: Some(TableAlias { name: Ident { value: "V", quote_style: None }, columns: [] }), for_system_time_as_of_proctime: true }, join_operator: Inner(On(BinaryOp { left: Identifier(Ident { value: "id1", quote_style: None }), op: Eq, right: Identifier(Ident { value: "id2", quote_style: None }) })) }] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: select percentile_cont(0.3) within group (order by x desc) from unnest(array[1,2,4,5,10]) as x formatted_sql: SELECT percentile_cont(0.3) FROM unnest(ARRAY[1, 2, 4, 5, 10]) AS x - formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Function(Function { name: ObjectName([Ident { value: "percentile_cont", quote_style: None }]), args: [Unnamed(Expr(Value(Number("0.3"))))], over: None, distinct: false, order_by: [], filter: None, within_group: Some(OrderByExpr { expr: Identifier(Ident { value: "x", quote_style: None }), asc: Some(false), nulls_first: None }) }))], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "unnest", quote_style: None }]), alias: Some(TableAlias { name: Ident { value: "x", quote_style: None }, columns: [] }), args: [Unnamed(Expr(Array(Array { elem: [Value(Number("1")), Value(Number("2")), Value(Number("4")), Value(Number("5")), Value(Number("10"))], named: true })))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Function(Function { name: ObjectName([Ident { value: "percentile_cont", quote_style: None }]), args: [Unnamed(Expr(Value(Number("0.3"))))], over: None, distinct: false, order_by: [], filter: None, within_group: Some(OrderByExpr { expr: Identifier(Ident { value: "x", quote_style: None }), asc: Some(false), nulls_first: None }) }))], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "unnest", quote_style: None }]), alias: Some(TableAlias { name: Ident { value: "x", quote_style: None }, columns: [] }), args: [Unnamed(Expr(Array(Array { elem: [Value(Number("1")), Value(Number("2")), Value(Number("4")), Value(Number("5")), Value(Number("10"))], named: true })))], with_ordinality: false }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: select percentile_cont(0.3) within group (order by x, y desc) from t error_msg: 'sql parser error: only one arg in order by is expected here' - input: select 'apple' ~~ 'app%' diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml index 1cdaccf3cdba5..fc01eba294564 100644 --- a/src/storage/Cargo.toml +++ b/src/storage/Cargo.toml @@ -14,7 +14,6 @@ ignored = ["workspace-hack"] normal = ["workspace-hack"] [dependencies] -anyhow = "1" arc-swap = "1" async-trait = "0.1" auto_enums = { version = "0.8", features = ["futures03"] } @@ -22,11 +21,11 @@ await-tree = { workspace = true } bytes = { version = "1", features = ["serde"] } crossbeam = "0.8.2" dashmap = { version = "5", default-features = false } -dyn-clone = "1.0.13" +dyn-clone = "1.0.14" either = "1" enum-as-inner = "0.6" fail = "0.5" -foyer = { git = "https://github.com/mrcroxx/foyer", rev = "2c6f080" } +foyer = { git = "https://github.com/MrCroxx/foyer", rev = "2261151" } futures = { version = "0.3", default-features = false, features = ["alloc"] } futures-async-stream = { workspace = true } hex = "0.4" @@ -38,7 +37,7 @@ more-asserts = "0.3" num-integer = "0.1" parking_lot = "0.12" prometheus = { version = "0.13", features = ["process"] } -prost = "0.11" +prost = { workspace = true } rand = "0.8" risingwave_backup = { workspace = true } risingwave_common = { workspace = true } @@ -66,11 +65,12 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [ "signal", ] } tokio-retry = "0.3" +tonic = { workspace = true } tracing = "0.1" tracing-futures = { version = "0.2", features = ["futures-03"] } -xorf = "0.8.1" +xorf = "0.10.2" xxhash-rust = { version = "0.8.7", features = ["xxh32", "xxh64"] } -zstd = { version = "0.12", default-features = false } +zstd = { version = "0.13", default-features = false } [target.'cfg(target_os = "linux")'.dependencies] procfs = { version = "0.15", default-features = false } @@ -79,7 +79,7 @@ nix = { version = "0.27", features = ["fs", "mman"] } [target.'cfg(target_os = "macos")'.dependencies] darwin-libproc = { git = "https://github.com/risingwavelabs/darwin-libproc.git", rev = "a502be24bd0971463f5bcbfe035a248d8ba503b7" } -libc = "0.2.147" +libc = "0.2.148" mach2 = "0.4" [target.'cfg(not(madsim))'.dependencies] @@ -87,7 +87,7 @@ workspace-hack = { path = "../workspace-hack" } [dev-dependencies] criterion = { workspace = true, features = ["async_futures"] } -moka = { version = "0.11", features = ["future"] } +moka = { version = "0.12", features = ["future"] } risingwave_test_runner = { workspace = true } uuid = { version = "1", features = ["v4"] } diff --git a/src/storage/backup/Cargo.toml b/src/storage/backup/Cargo.toml index c36dd17f364ca..f4f66927c33d4 100644 --- a/src/storage/backup/Cargo.toml +++ b/src/storage/backup/Cargo.toml @@ -19,7 +19,7 @@ async-trait = "0.1" bytes = { version = "1", features = ["serde"] } itertools = "0.11" parking_lot = { version = "0.12", features = ["arc_lock"] } -prost = "0.11" +prost = { workspace = true } risingwave_common = { workspace = true } risingwave_hummock_sdk = { workspace = true } risingwave_object_store = { workspace = true } diff --git a/src/storage/backup/cmd/Cargo.toml b/src/storage/backup/cmd/Cargo.toml deleted file mode 100644 index e94b16685a620..0000000000000 --- a/src/storage/backup/cmd/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "risingwave_backup_cmd" -version = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -keywords = { workspace = true } -license = { workspace = true } -repository = { workspace = true } - -[package.metadata.cargo-machete] -ignored = ["workspace-hack"] - -[package.metadata.cargo-udeps.ignore] -normal = ["workspace-hack"] - -[dependencies] -clap = { version = "4", features = ["derive"] } -prometheus = { version = "0.13" } -risingwave_backup = { workspace = true } -risingwave_meta = { workspace = true } -risingwave_rt = { workspace = true } -tokio = { version = "0.2", package = "madsim-tokio", features = [ - "rt", - "rt-multi-thread", - "sync", - "macros", - "time", - "signal", - "fs", - "tracing", -] } - -[[bin]] -name = "backup-restore" -path = "src/bin/backup_restore.rs" - -[lints] -workspace = true diff --git a/src/storage/backup/integration_tests/Makefile.toml b/src/storage/backup/integration_tests/Makefile.toml index 3e648db66e95c..ad5b2810c9f17 100644 --- a/src/storage/backup/integration_tests/Makefile.toml +++ b/src/storage/backup/integration_tests/Makefile.toml @@ -8,7 +8,6 @@ script = """ set -e BUILD_BIN="$(pwd)/target/${RISEDEV_BUILD_TARGET_DIR}${BUILD_MODE_DIR}" test_root="src/storage/backup/integration_tests" -BACKUP_TEST_BACKUP_RESTORE="${BUILD_BIN}/backup-restore" \ BACKUP_TEST_MCLI="${PREFIX_BIN}/mcli" \ BACKUP_TEST_MCLI_CONFIG="${PREFIX_CONFIG}/mcli" \ BACKUP_TEST_RW_ALL_IN_ONE="${BUILD_BIN}/risingwave" \ diff --git a/src/storage/backup/integration_tests/common.sh b/src/storage/backup/integration_tests/common.sh index dcb240bfe604b..638c2b923776e 100644 --- a/src/storage/backup/integration_tests/common.sh +++ b/src/storage/backup/integration_tests/common.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash set -eo pipefail -[ -n "${BACKUP_TEST_BACKUP_RESTORE}" ] [ -n "${BACKUP_TEST_MCLI}" ] [ -n "${BACKUP_TEST_MCLI_CONFIG}" ] [ -n "${BACKUP_TEST_RW_ALL_IN_ONE}" ] @@ -52,7 +51,7 @@ function drop_mvs() { function backup() { local job_id - job_id=$(${BACKUP_TEST_RW_ALL_IN_ONE} risectl meta backup-meta 2>&1 | grep "backup job succeeded" | awk '{print $(NF)}') + job_id=$(${BACKUP_TEST_RW_ALL_IN_ONE} risectl meta backup-meta 2>&1 | grep "backup job succeeded" | awk -F ',' '{print $(NF-1)}'| awk '{print $(NF)}') [ -n "${job_id}" ] echo "${job_id}" } @@ -70,7 +69,10 @@ function restore() { stop_cluster clean_etcd_data start_etcd_minio - ${BACKUP_TEST_BACKUP_RESTORE} \ + ${BACKUP_TEST_RW_ALL_IN_ONE} \ + risectl \ + meta \ + restore-meta \ --meta-store-type etcd \ --meta-snapshot-id "${job_id}" \ --etcd-endpoints 127.0.0.1:2388 \ diff --git a/src/storage/backup/src/lib.rs b/src/storage/backup/src/lib.rs index 330dfbc4de44c..1daacbf691c0d 100644 --- a/src/storage/backup/src/lib.rs +++ b/src/storage/backup/src/lib.rs @@ -14,7 +14,6 @@ #![allow(clippy::derive_partial_eq_without_eq)] #![feature(trait_alias)] -#![feature(binary_heap_drain_sorted)] #![feature(type_alias_impl_trait)] #![feature(extract_if)] #![feature(custom_test_frameworks)] @@ -26,7 +25,7 @@ #![feature(lazy_cell)] #![feature(let_chains)] #![feature(error_generic_member_access)] -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] pub mod error; pub mod meta_snapshot; diff --git a/src/storage/backup/src/storage.rs b/src/storage/backup/src/storage.rs index 6ee80292d44a1..85583e6a9b267 100644 --- a/src/storage/backup/src/storage.rs +++ b/src/storage/backup/src/storage.rs @@ -75,7 +75,7 @@ impl ObjectStoreMetaSnapshotStorage { async fn get_manifest(&self) -> BackupResult> { let manifest_path = self.get_manifest_path(); - let bytes = match self.store.read(&manifest_path, None).await { + let bytes = match self.store.read(&manifest_path, ..).await { Ok(bytes) => bytes, Err(e) => { if e.is_object_not_found_error() { @@ -129,7 +129,7 @@ impl MetaSnapshotStorage for ObjectStoreMetaSnapshotStorage { async fn get(&self, id: MetaSnapshotId) -> BackupResult { let path = self.get_snapshot_path(id); - let data = self.store.read(&path, None).await?; + let data = self.store.read(&path, ..).await?; MetaSnapshot::decode(&data) } diff --git a/src/storage/benches/bench_compactor.rs b/src/storage/benches/bench_compactor.rs index c709c508acc3b..41a3649adc5cf 100644 --- a/src/storage/benches/bench_compactor.rs +++ b/src/storage/benches/bench_compactor.rs @@ -56,6 +56,7 @@ pub fn mock_sstable_store() -> SstableStoreRef { 0, FileCache::none(), FileCache::none(), + None, )) } @@ -112,11 +113,7 @@ async fn build_table( let end = start + 8; full_key.user_key.table_key[table_key_len - 8..].copy_from_slice(&i.to_be_bytes()); builder - .add_for_test( - full_key.to_ref(), - HummockValue::put(&value[start..end]), - true, - ) + .add_for_test(full_key.to_ref(), HummockValue::put(&value[start..end])) .await .unwrap(); } @@ -193,10 +190,9 @@ async fn compact>(iter: I, sstable_store watermark: 0, stats_target_table_ids: None, task_type: compact_task::TaskType::Dynamic, - is_target_l0_or_lbase: false, - split_by_table: false, split_weight_by_vnode: 0, use_block_based_filter: true, + ..Default::default() }; compact_and_build_sst( &mut builder, diff --git a/src/storage/benches/bench_merge_iter.rs b/src/storage/benches/bench_merge_iter.rs index 295ab2784a7a9..67c2642a86e64 100644 --- a/src/storage/benches/bench_merge_iter.rs +++ b/src/storage/benches/bench_merge_iter.rs @@ -17,6 +17,7 @@ use std::cell::RefCell; use bytes::Bytes; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use futures::executor::block_on; +use risingwave_hummock_sdk::key::TableKey; use risingwave_storage::hummock::iterator::{ Forward, HummockIterator, HummockIteratorUnion, OrderedMergeIteratorInner, UnorderedMergeIteratorInner, @@ -35,7 +36,9 @@ fn gen_interleave_shared_buffer_batch_iter( let mut batch_data = vec![]; for j in 0..batch_size { batch_data.push(( - Bytes::copy_from_slice(format!("test_key_{:08}", j * batch_count + i).as_bytes()), + TableKey(Bytes::copy_from_slice( + format!("test_key_{:08}", j * batch_count + i).as_bytes(), + )), HummockValue::put(Bytes::copy_from_slice("value".as_bytes())), )); } @@ -63,7 +66,9 @@ fn gen_interleave_shared_buffer_batch_enum_iter( let mut batch_data = vec![]; for j in 0..batch_size { batch_data.push(( - Bytes::copy_from_slice(format!("test_key_{:08}", j * batch_count + i).as_bytes()), + TableKey(Bytes::copy_from_slice( + format!("test_key_{:08}", j * batch_count + i).as_bytes(), + )), HummockValue::put(Bytes::copy_from_slice("value".as_bytes())), )); } diff --git a/src/storage/benches/bench_multi_builder.rs b/src/storage/benches/bench_multi_builder.rs index a295864060866..9bf0e0a9546ec 100644 --- a/src/storage/benches/bench_multi_builder.rs +++ b/src/storage/benches/bench_multi_builder.rs @@ -144,6 +144,7 @@ fn bench_builder( 0, FileCache::none(), FileCache::none(), + None, )); let mut group = c.benchmark_group("bench_multi_builder"); diff --git a/src/storage/clippy.toml b/src/storage/clippy.toml new file mode 100644 index 0000000000000..e87606a3672d9 --- /dev/null +++ b/src/storage/clippy.toml @@ -0,0 +1,26 @@ +disallowed-methods = [ + { path = "std::iter::Iterator::zip", reason = "Please use `zip_eq_fast` if it's available. Otherwise use `zip_eq_debug`" }, + { path = "itertools::Itertools::zip_eq", reason = "Please use `zip_eq_fast` if it's available. Otherwise use `zip_eq_debug`" }, + { path = "futures::stream::select_all", reason = "Please use `risingwave_common::util::select_all` instead." }, + { path = "risingwave_common::array::JsonbVal::from_serde", reason = "Please add dedicated methods as part of `JsonbRef`/`JsonbVal`, rather than take inner `serde_json::Value` out, process, and put back." }, + { path = "std::panic::catch_unwind", reason = "Please use `risingwave_common::util::panic::rw_catch_unwind` instead." }, + { path = "futures::FutureExt::catch_unwind", reason = "Please use `risingwave_common::util::panic::FutureCatchUnwindExt::rw_catch_unwind` instead." }, +] +disallowed-macros = [ + { path = "lazy_static::lazy_static", reason = "Please use `std::sync::LazyLock` instead." }, +] +doc-valid-idents = [ + "RisingWave", + "MinIO", + "ProtoBuf", + "BloomFilter", + "gRPC", + "PostgreSQL", + "MySQL", + "TopN", + "VNode" +] +avoid-breaking-exported-api = false +upper-case-acronyms-aggressive = true +too-many-arguments-threshold = 10 +ignore-interior-mutability = ["risingwave_hummock_sdk::key::TableKey"] \ No newline at end of file diff --git a/src/storage/compactor/Cargo.toml b/src/storage/compactor/Cargo.toml index f4118ff639b5d..e6e985b2ba424 100644 --- a/src/storage/compactor/Cargo.toml +++ b/src/storage/compactor/Cargo.toml @@ -15,20 +15,18 @@ ignored = ["workspace-hack"] normal = ["workspace-hack"] [dependencies] -anyhow = "1" async-trait = "0.1" await-tree = { workspace = true } clap = { version = "4", features = ["derive"] } parking_lot = "0.12" -prometheus = { version = "0.13" } risingwave_common = { workspace = true } +risingwave_common_heap_profiling = { workspace = true } risingwave_common_service = { workspace = true } risingwave_object_store = { workspace = true } risingwave_pb = { workspace = true } risingwave_rpc_client = { workspace = true } risingwave_storage = { workspace = true } serde = { version = "1", features = ["derive"] } -serde_json = "1" tokio = { version = "0.2", package = "madsim-tokio", features = [ "fs", "rt", diff --git a/src/storage/compactor/src/lib.rs b/src/storage/compactor/src/lib.rs index 8043cb5d2214d..b269b2aec73d8 100644 --- a/src/storage/compactor/src/lib.rs +++ b/src/storage/compactor/src/lib.rs @@ -14,13 +14,15 @@ mod compactor_observer; mod rpc; -mod server; +pub mod server; mod telemetry; use clap::Parser; -use risingwave_common::config::{AsyncStackTraceOption, MetricLevel, OverrideConfig}; +use risingwave_common::config::{ + AsyncStackTraceOption, CompactorMode, MetricLevel, OverrideConfig, +}; -use crate::server::compactor_serve; +use crate::server::{compactor_serve, shared_compactor_serve}; /// Command-line arguments for compactor-node. #[derive(Parser, Clone, Debug, OverrideConfig)] @@ -75,6 +77,11 @@ pub struct CompactorOpts { #[override_opts(path = streaming.async_stack_trace)] pub async_stack_trace: Option, + /// Enable heap profile dump when memory usage is high. + #[clap(long, env = "RW_HEAP_PROFILING_DIR")] + #[override_opts(path = server.heap_profiling.dir)] + pub heap_profiling_dir: Option, + #[clap(long, env = "RW_OBJECT_STORE_STREAMING_READ_TIMEOUT_MS", value_enum)] #[override_opts(path = storage.object_store_streaming_read_timeout_ms)] pub object_store_streaming_read_timeout_ms: Option, @@ -87,6 +94,12 @@ pub struct CompactorOpts { #[clap(long, env = "RW_OBJECT_STORE_READ_TIMEOUT_MS", value_enum)] #[override_opts(path = storage.object_store_read_timeout_ms)] pub object_store_read_timeout_ms: Option, + + #[clap(long, env = "RW_COMPACTOR_MODE", value_enum)] + pub compactor_mode: Option, + + #[clap(long, env = "RW_PROXY_RPC_ENDPOINT", default_value = "")] + pub proxy_rpc_endpoint: String, } use std::future::Future; @@ -95,28 +108,42 @@ use std::pin::Pin; pub fn start(opts: CompactorOpts) -> Pin + Send>> { // WARNING: don't change the function signature. Making it `async fn` will cause // slow compile in release mode. - Box::pin(async move { - tracing::info!("Compactor node options: {:?}", opts); - tracing::info!("meta address: {}", opts.meta_address.clone()); - - let listen_addr = opts.listen_addr.parse().unwrap(); - tracing::info!("Server Listening at {}", listen_addr); - - let advertise_addr = opts - .advertise_addr - .as_ref() - .unwrap_or_else(|| { - tracing::warn!("advertise addr is not specified, defaulting to listen address"); - &opts.listen_addr - }) - .parse() - .unwrap(); - tracing::info!(" address is {}", advertise_addr); - - let (join_handle, observer_join_handle, _shutdown_sender) = - compactor_serve(listen_addr, advertise_addr, opts).await; - - join_handle.await.unwrap(); - observer_join_handle.abort(); - }) + match opts.compactor_mode { + Some(CompactorMode::Shared) => Box::pin(async move { + tracing::info!("Shared compactor pod options: {:?}", opts); + tracing::info!("Proxy rpc endpoint: {}", opts.proxy_rpc_endpoint.clone()); + + let listen_addr = opts.listen_addr.parse().unwrap(); + + let (join_handle, _shutdown_sender) = shared_compactor_serve(listen_addr, opts).await; + + tracing::info!("Server listening at {}", listen_addr); + + join_handle.await.unwrap(); + }), + None | Some(CompactorMode::Dedicated) => Box::pin(async move { + tracing::info!("Compactor node options: {:?}", opts); + tracing::info!("meta address: {}", opts.meta_address.clone()); + + let listen_addr = opts.listen_addr.parse().unwrap(); + + let advertise_addr = opts + .advertise_addr + .as_ref() + .unwrap_or_else(|| { + tracing::warn!("advertise addr is not specified, defaulting to listen address"); + &opts.listen_addr + }) + .parse() + .unwrap(); + tracing::info!(" address is {}", advertise_addr); + let (join_handle, observer_join_handle, _shutdown_sender) = + compactor_serve(listen_addr, advertise_addr, opts).await; + + tracing::info!("Server listening at {}", listen_addr); + + join_handle.await.unwrap(); + observer_join_handle.abort(); + }), + } } diff --git a/src/storage/compactor/src/rpc.rs b/src/storage/compactor/src/rpc.rs index 80f146d30f5ed..2182d47af8642 100644 --- a/src/storage/compactor/src/rpc.rs +++ b/src/storage/compactor/src/rpc.rs @@ -12,27 +12,59 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; use std::sync::Arc; use parking_lot::RwLock; use risingwave_pb::compactor::compactor_service_server::CompactorService; -use risingwave_pb::compactor::{EchoRequest, EchoResponse}; +use risingwave_pb::compactor::{ + DispatchCompactionTaskRequest, DispatchCompactionTaskResponse, EchoRequest, EchoResponse, +}; use risingwave_pb::monitor_service::monitor_service_server::MonitorService; use risingwave_pb::monitor_service::{ - HeapProfilingRequest, HeapProfilingResponse, ProfilingRequest, ProfilingResponse, + AnalyzeHeapRequest, AnalyzeHeapResponse, HeapProfilingRequest, HeapProfilingResponse, + ListHeapProfilingRequest, ListHeapProfilingResponse, ProfilingRequest, ProfilingResponse, StackTraceRequest, StackTraceResponse, }; +use tokio::sync::mpsc; use tonic::{Request, Response, Status}; #[derive(Default)] -pub struct CompactorServiceImpl {} - +pub struct CompactorServiceImpl { + sender: Option>>, +} +impl CompactorServiceImpl { + pub fn new(sender: mpsc::UnboundedSender>) -> Self { + Self { + sender: Some(sender), + } + } +} #[async_trait::async_trait] impl CompactorService for CompactorServiceImpl { async fn echo(&self, _request: Request) -> Result, Status> { Ok(Response::new(EchoResponse {})) } + + async fn dispatch_compaction_task( + &self, + request: Request, + ) -> Result, Status> { + match &self.sender.as_ref() { + Some(sender) => { + sender + .send(request) + .expect("DispatchCompactionTaskRequest should be able to send"); + } + None => { + tracing::error!( + "fail to send DispatchCompactionTaskRequest, sender has not been initialized." + ); + } + } + Ok(Response::new(DispatchCompactionTaskResponse { + status: None, + })) + } } pub struct MonitorServiceImpl { @@ -52,7 +84,7 @@ impl MonitorService for MonitorServiceImpl { _request: Request, ) -> Result, Status> { let compaction_task_traces = match &self.await_tree_reg { - None => HashMap::default(), + None => Default::default(), Some(await_tree_reg) => await_tree_reg .read() .iter() @@ -83,4 +115,22 @@ impl MonitorService for MonitorServiceImpl { "Heap profiling unimplemented in compactor", )) } + + async fn list_heap_profiling( + &self, + _request: Request, + ) -> Result, Status> { + Err(Status::unimplemented( + "Heap profiling unimplemented in compactor", + )) + } + + async fn analyze_heap( + &self, + _request: Request, + ) -> Result, Status> { + Err(Status::unimplemented( + "Heap profiling unimplemented in compactor", + )) + } } diff --git a/src/storage/compactor/src/server.rs b/src/storage/compactor/src/server.rs index dd953b87c7af9..3ad23bf68cc3b 100644 --- a/src/storage/compactor/src/server.rs +++ b/src/storage/compactor/src/server.rs @@ -19,15 +19,17 @@ use std::time::Duration; use parking_lot::RwLock; use risingwave_common::config::{ - extract_storage_memory_config, load_config, AsyncStackTraceOption, MetricLevel, + extract_storage_memory_config, load_config, AsyncStackTraceOption, MetricLevel, RwConfig, }; use risingwave_common::monitor::connection::{RouterExt, TcpConfig}; use risingwave_common::system_param::local_manager::LocalSystemParamsManager; +use risingwave_common::system_param::reader::SystemParamsReader; use risingwave_common::telemetry::manager::TelemetryManager; use risingwave_common::telemetry::telemetry_env_enabled; use risingwave_common::util::addr::HostAddr; use risingwave_common::util::resource_util; use risingwave_common::{GIT_SHA, RW_VERSION}; +use risingwave_common_heap_profiling::HeapProfiler; use risingwave_common_service::metrics_manager::MetricsManager; use risingwave_common_service::observer_manager::ObserverManager; use risingwave_object_store::object::object_metrics::GLOBAL_OBJECT_STORE_METRICS; @@ -35,7 +37,7 @@ use risingwave_object_store::object::parse_remote_object_store; use risingwave_pb::common::WorkerType; use risingwave_pb::compactor::compactor_service_server::CompactorServiceServer; use risingwave_pb::monitor_service::monitor_service_server::MonitorServiceServer; -use risingwave_rpc_client::MetaClient; +use risingwave_rpc_client::{GrpcCompactorProxyClient, MetaClient}; use risingwave_storage::filter_key_extractor::{ FilterKeyExtractorManager, RemoteTableAccessor, RpcFilterKeyExtractorManager, }; @@ -45,11 +47,13 @@ use risingwave_storage::hummock::{ HummockMemoryCollector, MemoryLimiter, SstableObjectIdManager, SstableStore, }; use risingwave_storage::monitor::{ - monitor_cache, GLOBAL_COMPACTOR_METRICS, GLOBAL_HUMMOCK_METRICS, + monitor_cache, CompactorMetrics, GLOBAL_COMPACTOR_METRICS, GLOBAL_HUMMOCK_METRICS, }; use risingwave_storage::opts::StorageOpts; +use tokio::sync::mpsc; use tokio::sync::oneshot::Sender; use tokio::task::JoinHandle; +use tonic::transport::Endpoint; use tracing::info; use super::compactor_observer::observer_manager::CompactorObserverNode; @@ -57,47 +61,24 @@ use crate::rpc::{CompactorServiceImpl, MonitorServiceImpl}; use crate::telemetry::CompactorTelemetryCreator; use crate::CompactorOpts; -/// Fetches and runs compaction tasks. -pub async fn compactor_serve( - listen_addr: SocketAddr, - advertise_addr: HostAddr, - opts: CompactorOpts, -) -> (JoinHandle<()>, JoinHandle<()>, Sender<()>) { - type CompactorMemoryCollector = HummockMemoryCollector; - - let config = load_config(&opts.config_path, &opts); - info!("Starting compactor node",); - info!("> config: {:?}", config); - info!( - "> debug assertions: {}", - if cfg!(debug_assertions) { "on" } else { "off" } - ); - info!("> version: {} ({})", RW_VERSION, GIT_SHA); - - // Register to the cluster. - let (meta_client, system_params_reader) = MetaClient::register_new( - &opts.meta_address, - WorkerType::Compactor, - &advertise_addr, - Default::default(), - &config.meta, - ) - .await - .unwrap(); - - info!("Assigned compactor id {}", meta_client.worker_id()); - meta_client.activate(&advertise_addr).await.unwrap(); - +const ENDPOINT_KEEP_ALIVE_INTERVAL_SEC: u64 = 60; +// See `Endpoint::keep_alive_timeout` +const ENDPOINT_KEEP_ALIVE_TIMEOUT_SEC: u64 = 60; +pub async fn prepare_start_parameters( + config: RwConfig, + system_params_reader: SystemParamsReader, +) -> ( + Arc, + Arc, + HeapProfiler, + Option>>>, + Arc, + Arc, +) { // Boot compactor let object_metrics = Arc::new(GLOBAL_OBJECT_STORE_METRICS.clone()); - let hummock_metrics = Arc::new(GLOBAL_HUMMOCK_METRICS.clone()); let compactor_metrics = Arc::new(GLOBAL_COMPACTOR_METRICS.clone()); - let hummock_meta_client = Arc::new(MonitoredHummockMetaClient::new( - meta_client.clone(), - hummock_metrics.clone(), - )); - let state_store_url = system_params_reader.state_store(); let storage_memory_config = extract_storage_memory_config(&config); @@ -106,9 +87,8 @@ pub async fn compactor_serve( &system_params_reader, &storage_memory_config, ))); - let total_memory_available_bytes = - (resource_util::memory::total_memory_available_bytes() as f64 + (resource_util::memory::system_memory_available_bytes() as f64 * config.storage.compactor_memory_available_proportion) as usize; let meta_cache_capacity_bytes = storage_opts.meta_cache_capacity_mb * (1 << 20); let compactor_memory_limit_bytes = match config.storage.compactor_memory_limit_mb { @@ -156,6 +136,86 @@ pub async fn compactor_serve( meta_cache_capacity_bytes, )); + let memory_limiter = Arc::new(MemoryLimiter::new(compactor_memory_limit_bytes)); + let storage_memory_config = extract_storage_memory_config(&config); + let memory_collector: Arc = Arc::new(HummockMemoryCollector::new( + sstable_store.clone(), + memory_limiter.clone(), + storage_memory_config, + )); + + let heap_profiler = HeapProfiler::new( + total_memory_available_bytes, + config.server.heap_profiling.clone(), + ); + + monitor_cache(memory_collector); + + let await_tree_config = match &config.streaming.async_stack_trace { + AsyncStackTraceOption::Off => None, + c => await_tree::ConfigBuilder::default() + .verbose(c.is_verbose().unwrap()) + .build() + .ok(), + }; + let await_tree_reg = + await_tree_config.map(|c| Arc::new(RwLock::new(await_tree::Registry::new(c)))); + + ( + sstable_store, + memory_limiter, + heap_profiler, + await_tree_reg, + storage_opts, + compactor_metrics, + ) +} + +/// Fetches and runs compaction tasks. +pub async fn compactor_serve( + listen_addr: SocketAddr, + advertise_addr: HostAddr, + opts: CompactorOpts, +) -> (JoinHandle<()>, JoinHandle<()>, Sender<()>) { + let config = load_config(&opts.config_path, &opts); + info!("Starting compactor node",); + info!("> config: {:?}", config); + info!( + "> debug assertions: {}", + if cfg!(debug_assertions) { "on" } else { "off" } + ); + info!("> version: {} ({})", RW_VERSION, GIT_SHA); + + // Register to the cluster. + let (meta_client, system_params_reader) = MetaClient::register_new( + &opts.meta_address, + WorkerType::Compactor, + &advertise_addr, + Default::default(), + &config.meta, + ) + .await + .unwrap(); + + info!("Assigned compactor id {}", meta_client.worker_id()); + meta_client.activate(&advertise_addr).await.unwrap(); + + let hummock_metrics = Arc::new(GLOBAL_HUMMOCK_METRICS.clone()); + + let hummock_meta_client = Arc::new(MonitoredHummockMetaClient::new( + meta_client.clone(), + hummock_metrics.clone(), + )); + + let ( + sstable_store, + memory_limiter, + heap_profiler, + await_tree_reg, + storage_opts, + compactor_metrics, + ) = prepare_start_parameters(config.clone(), system_params_reader.clone()).await; + let filter_key_extractor_manager = Arc::new(RpcFilterKeyExtractorManager::new(Box::new( RemoteTableAccessor::new(meta_client.clone()), ))); @@ -167,31 +227,20 @@ pub async fn compactor_serve( let observer_manager = ObserverManager::new_with_meta_client(meta_client.clone(), compactor_observer_node).await; + // Run a background heap profiler + heap_profiler.start(); + // use half of limit because any memory which would hold in meta-cache will be allocate by // limited at first. let observer_join_handle = observer_manager.start().await; - let memory_limiter = Arc::new(MemoryLimiter::new(compactor_memory_limit_bytes)); - let memory_collector = Arc::new(CompactorMemoryCollector::new( - sstable_store.clone(), - memory_limiter.clone(), - storage_memory_config, - )); - - monitor_cache(memory_collector); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage_opts.sstable_id_remote_fetch_number, )); - let await_tree_config = match &config.streaming.async_stack_trace { - AsyncStackTraceOption::Off => None, - c => await_tree::ConfigBuilder::default() - .verbose(c.is_verbose().unwrap()) - .build() - .ok(), - }; - let await_tree_reg = - await_tree_config.map(|c| Arc::new(RwLock::new(await_tree::Registry::new(c)))); + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( + filter_key_extractor_manager.clone(), + ); let compactor_context = CompactorContext { storage_opts, sstable_store: sstable_store.clone(), @@ -200,9 +249,6 @@ pub async fn compactor_serve( compaction_executor: Arc::new(CompactionExecutor::new( opts.compaction_worker_threads_number, )), - filter_key_extractor_manager: FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - filter_key_extractor_manager.clone(), - ), memory_limiter, task_progress_manager: Default::default(), @@ -219,6 +265,7 @@ pub async fn compactor_serve( compactor_context.clone(), hummock_meta_client.clone(), sstable_object_id_manager.clone(), + filter_key_extractor_manager.clone(), ), ]; @@ -275,3 +322,105 @@ pub async fn compactor_serve( (join_handle, observer_join_handle, shutdown_send) } + +pub async fn shared_compactor_serve( + listen_addr: SocketAddr, + opts: CompactorOpts, +) -> (JoinHandle<()>, Sender<()>) { + let config = load_config(&opts.config_path, &opts); + info!("Starting shared compactor node",); + info!("> config: {:?}", config); + info!( + "> debug assertions: {}", + if cfg!(debug_assertions) { "on" } else { "off" } + ); + info!("> version: {} ({})", RW_VERSION, GIT_SHA); + + let endpoint_str = opts.proxy_rpc_endpoint.clone().to_string(); + let endpoint = + Endpoint::from_shared(opts.proxy_rpc_endpoint).expect("Fail to construct tonic Endpoint"); + let channel = endpoint + .http2_keep_alive_interval(Duration::from_secs(ENDPOINT_KEEP_ALIVE_INTERVAL_SEC)) + .keep_alive_timeout(Duration::from_secs(ENDPOINT_KEEP_ALIVE_TIMEOUT_SEC)) + .connect_timeout(Duration::from_secs(5)) + .connect() + .await + .expect("Failed to create channel via proxy rpc endpoint."); + let grpc_proxy_client = GrpcCompactorProxyClient::new(channel, endpoint_str); + let system_params_response = grpc_proxy_client + .get_system_params() + .await + .expect("Fail to get system params, the compactor pod cannot be started."); + let system_params = system_params_response.into_inner().params.unwrap(); + + let ( + sstable_store, + memory_limiter, + heap_profiler, + await_tree_reg, + storage_opts, + compactor_metrics, + ) = prepare_start_parameters(config.clone(), system_params.into()).await; + let (sender, receiver) = mpsc::unbounded_channel(); + let compactor_srv: CompactorServiceImpl = CompactorServiceImpl::new(sender); + + let monitor_srv = MonitorServiceImpl::new(await_tree_reg.clone()); + + // Run a background heap profiler + heap_profiler.start(); + + let (shutdown_send, mut shutdown_recv) = tokio::sync::oneshot::channel(); + let compactor_context = CompactorContext { + storage_opts, + sstable_store, + compactor_metrics, + is_share_buffer_compact: false, + compaction_executor: Arc::new(CompactionExecutor::new( + opts.compaction_worker_threads_number, + )), + memory_limiter, + task_progress_manager: Default::default(), + await_tree_reg, + running_task_count: Arc::new(AtomicU32::new(0)), + }; + let join_handle = tokio::spawn(async move { + tonic::transport::Server::builder() + .add_service(CompactorServiceServer::new(compactor_srv)) + .add_service(MonitorServiceServer::new(monitor_srv)) + .monitored_serve_with_shutdown( + listen_addr, + "grpc-compactor-node-service", + TcpConfig { + tcp_nodelay: true, + keepalive_duration: None, + }, + async move { + let (join_handle, shutdown_sender) = + risingwave_storage::hummock::compactor::start_shared_compactor( + grpc_proxy_client, + receiver, + compactor_context, + ); + tokio::select! { + _ = tokio::signal::ctrl_c() => {}, + _ = &mut shutdown_recv => { + if let Err(err) = shutdown_sender.send(()) { + tracing::warn!("Failed to send shutdown: {:?}", err); + } + if let Err(err) = join_handle.await { + tracing::warn!("Failed to join shutdown: {:?}", err); + } + }, + } + }, + ) + .await + }); + + // Boot metrics service. + if config.server.metrics_level > MetricLevel::Disabled { + MetricsManager::boot_metrics_service(opts.prometheus_listener_addr.clone()); + } + + (join_handle, shutdown_send) +} diff --git a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs index 1193877a14c9b..3e4286eb856bc 100644 --- a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs +++ b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs @@ -937,7 +937,15 @@ pub fn add_ssts_to_sub_level( let b = sst2.key_range.as_ref().unwrap(); a.compare(b) }); - assert!(can_concat(&l0.sub_levels[sub_level_idx].table_infos)); + assert!( + can_concat(&l0.sub_levels[sub_level_idx].table_infos), + "sstable ids: {:?}", + l0.sub_levels[sub_level_idx] + .table_infos + .iter() + .map(|sst| sst.sst_id) + .collect_vec() + ); } } @@ -1039,7 +1047,15 @@ fn level_insert_ssts(operand: &mut Level, insert_table_infos: Vec) if operand.level_type == LevelType::Overlapping as i32 { operand.level_type = LevelType::Nonoverlapping as i32; } - assert!(can_concat(&operand.table_infos)); + assert!( + can_concat(&operand.table_infos), + "sstable ids: {:?}", + operand + .table_infos + .iter() + .map(|sst| sst.sst_id) + .collect_vec() + ); } pub fn object_size_map(version: &HummockVersion) -> HashMap { diff --git a/src/storage/hummock_sdk/src/key.rs b/src/storage/hummock_sdk/src/key.rs index f58f32dd9aac9..d635bb4518a4d 100644 --- a/src/storage/hummock_sdk/src/key.rs +++ b/src/storage/hummock_sdk/src/key.rs @@ -20,6 +20,7 @@ use std::ptr; use bytes::{Buf, BufMut, Bytes, BytesMut}; use risingwave_common::catalog::TableId; +use risingwave_common::estimate_size::EstimateSize; use risingwave_common::hash::VirtualNode; use crate::HummockEpoch; @@ -411,6 +412,12 @@ impl> TableKey { } } +impl EstimateSize for TableKey { + fn estimated_heap_size(&self) -> usize { + self.0.estimated_heap_size() + } +} + #[inline] pub fn map_table_key_range(range: (Bound, Bound)) -> TableKeyRange { (range.0.map(TableKey), range.1.map(TableKey)) diff --git a/src/storage/hummock_sdk/src/key_range.rs b/src/storage/hummock_sdk/src/key_range.rs index d54728b329b77..ff1c6b9960d60 100644 --- a/src/storage/hummock_sdk/src/key_range.rs +++ b/src/storage/hummock_sdk/src/key_range.rs @@ -19,7 +19,7 @@ use bytes::Bytes; use super::key_cmp::KeyComparator; use crate::key::{FullKey, UserKey}; -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Default)] pub struct KeyRange { pub left: Bytes, pub right: Bytes, diff --git a/src/storage/hummock_test/Cargo.toml b/src/storage/hummock_test/Cargo.toml index 600a5249ddf1b..8abf2f45e6855 100644 --- a/src/storage/hummock_test/Cargo.toml +++ b/src/storage/hummock_test/Cargo.toml @@ -20,7 +20,7 @@ bytes = { version = "1" } clap = { version = "4", features = ["derive"] } fail = "0.5" futures = { version = "0.3", default-features = false, features = ["alloc"] } -futures-async-stream = "0.2" +futures-async-stream = "0.2.9" itertools = "0.11" parking_lot = "0.12" rand = "0.8" @@ -47,7 +47,7 @@ futures = { version = "0.3", default-features = false, features = [ "executor", ] } -futures-async-stream = "0.2" +futures-async-stream = "0.2.9" risingwave_test_runner = { workspace = true } serial_test = "2.0" sync-point = { path = "../../utils/sync-point" } diff --git a/src/storage/hummock_test/benches/bench_hummock_iter.rs b/src/storage/hummock_test/benches/bench_hummock_iter.rs index 3bd6738f9f9a2..1c6294fc672be 100644 --- a/src/storage/hummock_test/benches/bench_hummock_iter.rs +++ b/src/storage/hummock_test/benches/bench_hummock_iter.rs @@ -19,6 +19,7 @@ use bytes::Bytes; use criterion::{criterion_group, criterion_main, Criterion}; use futures::{pin_mut, TryStreamExt}; use risingwave_common::cache::CachePriority; +use risingwave_hummock_sdk::key::TableKey; use risingwave_hummock_test::get_notification_client_for_test; use risingwave_hummock_test::local_state_store_test_utils::LocalStateStoreTestExt; use risingwave_hummock_test::test_utils::TestIngestBatch; @@ -34,13 +35,15 @@ use risingwave_storage::StateStore; fn gen_interleave_shared_buffer_batch_iter( batch_size: usize, batch_count: usize, -) -> Vec> { +) -> Vec, StorageValue)>> { let mut ret = Vec::new(); for i in 0..batch_count { let mut batch_data = vec![]; for j in 0..batch_size { batch_data.push(( - Bytes::copy_from_slice(format!("test_key_{:08}", j * batch_count + i).as_bytes()), + TableKey(Bytes::copy_from_slice( + format!("test_key_{:08}", j * batch_count + i).as_bytes(), + )), StorageValue::new_put(Bytes::copy_from_slice("value".as_bytes())), )); } diff --git a/src/storage/hummock_test/src/bin/replay/main.rs b/src/storage/hummock_test/src/bin/replay/main.rs index 7a000c914e3a9..ae6038d8b5d16 100644 --- a/src/storage/hummock_test/src/bin/replay/main.rs +++ b/src/storage/hummock_test/src/bin/replay/main.rs @@ -13,7 +13,7 @@ // limitations under the License. #![feature(bound_map)] -#![feature(generators)] +#![feature(coroutines)] #![feature(stmt_expr_attributes)] #![feature(proc_macro_hygiene)] @@ -111,6 +111,7 @@ async fn create_replay_hummock(r: Record, args: &Args) -> Result Result>> { let key_range = ( - key_range.0.map(TracedBytes::into), - key_range.1.map(TracedBytes::into), + key_range.0.map(TracedBytes::into).map(TableKey), + key_range.1.map(TracedBytes::into).map(TableKey), ); let iter = self @@ -129,7 +130,7 @@ impl ReplayRead for GlobalReplayImpl { ) -> Result> { Ok(self .store - .get(key.into(), epoch, read_options.into()) + .get(TableKey(key.into()), epoch, read_options.into()) .await .unwrap() .map(TracedBytes::from)) @@ -240,7 +241,10 @@ impl LocalReplayRead for LocalReplayImpl { key_range: (Bound, Bound), read_options: TracedReadOptions, ) -> Result>> { - let key_range = (key_range.0.map(|b| b.into()), key_range.1.map(|b| b.into())); + let key_range = ( + key_range.0.map(|b| TableKey(b.into())), + key_range.1.map(|b| TableKey(b.into())), + ); let iter = LocalStateStore::iter(&self.0, key_range, read_options.into()) .await @@ -257,7 +261,7 @@ impl LocalReplayRead for LocalReplayImpl { read_options: TracedReadOptions, ) -> Result> { Ok( - LocalStateStore::get(&self.0, key.into(), read_options.into()) + LocalStateStore::get(&self.0, TableKey(key.into()), read_options.into()) .await .unwrap() .map(TracedBytes::from), @@ -275,7 +279,7 @@ impl ReplayWrite for LocalReplayImpl { ) -> Result<()> { LocalStateStore::insert( &mut self.0, - key.into(), + TableKey(key.into()), new_val.into(), old_val.map(|b| b.into()), ) @@ -284,7 +288,7 @@ impl ReplayWrite for LocalReplayImpl { } fn delete(&mut self, key: TracedBytes, old_val: TracedBytes) -> Result<()> { - LocalStateStore::delete(&mut self.0, key.into(), old_val.into()).unwrap(); + LocalStateStore::delete(&mut self.0, TableKey(key.into()), old_val.into()).unwrap(); Ok(()) } } diff --git a/src/storage/hummock_test/src/compactor_tests.rs b/src/storage/hummock_test/src/compactor_tests.rs index ca5418cbe6a0f..50d739c5d1eb9 100644 --- a/src/storage/hummock_test/src/compactor_tests.rs +++ b/src/storage/hummock_test/src/compactor_tests.rs @@ -15,44 +15,56 @@ #[cfg(test)] pub(crate) mod tests { - use std::collections::{BTreeSet, HashMap}; + use std::collections::{BTreeSet, HashMap, VecDeque}; use std::ops::Bound; use std::sync::atomic::AtomicU32; use std::sync::Arc; use bytes::{BufMut, Bytes, BytesMut}; use itertools::Itertools; - use rand::Rng; + use rand::{Rng, RngCore, SeedableRng}; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::TableId; use risingwave_common::constants::hummock::CompactionFilterFlag; use risingwave_common::util::epoch::Epoch; use risingwave_common_service::observer_manager::NotificationClient; + use risingwave_hummock_sdk::can_concat; use risingwave_hummock_sdk::compaction_group::hummock_version_ext::HummockVersionExt; use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; - use risingwave_hummock_sdk::key::{next_key, TABLE_PREFIX_LEN}; + use risingwave_hummock_sdk::key::{next_key, FullKey, TableKey, TABLE_PREFIX_LEN}; + use risingwave_hummock_sdk::prost_key_range::KeyRangeExt; use risingwave_hummock_sdk::table_stats::to_prost_table_stats_map; use risingwave_meta::hummock::compaction::compaction_config::CompactionConfigBuilder; - use risingwave_meta::hummock::compaction::{default_level_selector, ManualCompactionOption}; + use risingwave_meta::hummock::compaction::selector::{ + default_compaction_selector, ManualCompactionOption, + }; use risingwave_meta::hummock::test_utils::{ register_table_ids_to_compaction_group, setup_compute_env, setup_compute_env_with_config, unregister_table_ids_from_compaction_group, }; use risingwave_meta::hummock::{HummockManagerRef, MockHummockMetaClient}; use risingwave_pb::common::{HostAddress, WorkerType}; - use risingwave_pb::hummock::{HummockVersion, TableOption}; + use risingwave_pb::hummock::{CompactTask, HummockVersion, InputLevel, KeyRange, TableOption}; use risingwave_pb::meta::add_worker_node_request::Property; use risingwave_rpc_client::HummockMetaClient; use risingwave_storage::filter_key_extractor::{ - FilterKeyExtractorImpl, FilterKeyExtractorManager, FilterKeyExtractorManagerRef, - FixedLengthFilterKeyExtractor, FullKeyFilterKeyExtractor, + FilterKeyExtractorImpl, FilterKeyExtractorManager, FixedLengthFilterKeyExtractor, + FullKeyFilterKeyExtractor, + }; + use risingwave_storage::hummock::compactor::compactor_runner::{compact, CompactorRunner}; + use risingwave_storage::hummock::compactor::fast_compactor_runner::CompactorRunner as FastCompactorRunner; + use risingwave_storage::hummock::compactor::{ + CompactionExecutor, CompactorContext, DummyCompactionFilter, TaskProgress, }; - use risingwave_storage::hummock::compactor::compactor_runner::compact; - use risingwave_storage::hummock::compactor::{CompactionExecutor, CompactorContext}; use risingwave_storage::hummock::iterator::test_utils::mock_sstable_store; + use risingwave_storage::hummock::iterator::{ConcatIterator, UserIterator}; use risingwave_storage::hummock::sstable_store::SstableStoreRef; + use risingwave_storage::hummock::test_utils::gen_test_sstable_info; + use risingwave_storage::hummock::value::HummockValue; use risingwave_storage::hummock::{ - CachePolicy, HummockStorage as GlobalHummockStorage, HummockStorage, MemoryLimiter, + CachePolicy, CompactionDeleteRanges, CompressionAlgorithm, + HummockStorage as GlobalHummockStorage, HummockStorage, MemoryLimiter, + SharedComapctorObjectIdManager, Sstable, SstableBuilderOptions, SstableIteratorReadOptions, SstableObjectIdManager, }; use risingwave_storage::monitor::{CompactorMetrics, StoreLocalStatistic}; @@ -141,7 +153,10 @@ pub(crate) mod tests { new_val.extend_from_slice(&epoch.to_be_bytes()); local .ingest_batch( - vec![(key.clone(), StorageValue::new_put(Bytes::from(new_val)))], + vec![( + TableKey(key.clone()), + StorageValue::new_put(Bytes::from(new_val)), + )], vec![], WriteOptions { epoch, @@ -160,25 +175,18 @@ pub(crate) mod tests { .await .unwrap() .uncommitted_ssts; + hummock_meta_client.commit_epoch(epoch, ssts).await.unwrap(); } } - fn get_compactor_context_with_filter_key_extractor_manager( - storage: &HummockStorage, - filter_key_extractor_manager: FilterKeyExtractorManagerRef, - ) -> CompactorContext { - get_compactor_context_with_filter_key_extractor_manager_impl( - storage.storage_opts().clone(), - storage.sstable_store(), - filter_key_extractor_manager, - ) + fn get_compactor_context(storage: &HummockStorage) -> CompactorContext { + get_compactor_context_impl(storage.storage_opts().clone(), storage.sstable_store()) } - fn get_compactor_context_with_filter_key_extractor_manager_impl( + fn get_compactor_context_impl( options: Arc, sstable_store: SstableStoreRef, - filter_key_extractor_manager: FilterKeyExtractorManagerRef, ) -> CompactorContext { CompactorContext { storage_opts: options, @@ -187,9 +195,6 @@ pub(crate) mod tests { is_share_buffer_compact: false, compaction_executor: Arc::new(CompactionExecutor::new(Some(1))), memory_limiter: MemoryLimiter::unlimit(), - filter_key_extractor_manager: FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - filter_key_extractor_manager, - ), task_progress_manager: Default::default(), await_tree_reg: None, running_task_count: Arc::new(AtomicU32::new(0)), @@ -221,7 +226,6 @@ pub(crate) mod tests { Default::default(), ) .await; - let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() { FilterKeyExtractorManager::RpcFilterKeyExtractorManager( @@ -229,10 +233,10 @@ pub(crate) mod tests { ) => rpc_filter_key_extractor_manager, FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), }; - let compact_ctx = get_compactor_context_with_filter_key_extractor_manager( - &storage, + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( rpc_filter_key_extractor_manager, ); + let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage @@ -268,7 +272,7 @@ pub(crate) mod tests { while let Some(mut compact_task) = hummock_manager_ref .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() @@ -285,16 +289,23 @@ pub(crate) mod tests { compact_task.current_epoch_time = 0; let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( + let (result_task, task_stats) = compact( compact_ctx.clone(), compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), + filter_key_extractor_manager.clone(), ) .await; hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) + .report_compact_task_for_test( + result_task.task_id, + Some(compact_task), + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) .await .unwrap(); } @@ -343,7 +354,7 @@ pub(crate) mod tests { let get_ret = storage .get( - key.clone(), + TableKey(key.clone()), read_epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -356,7 +367,7 @@ pub(crate) mod tests { assert_eq!(get_val, val); let ret = storage .get( - key.clone(), + TableKey(key.clone()), ((TEST_WATERMARK - 1) * 1000) << 16, ReadOptions { prefix_hint: Some(key.clone()), @@ -392,10 +403,10 @@ pub(crate) mod tests { ) => rpc_filter_key_extractor_manager, FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), }; - let compact_ctx = get_compactor_context_with_filter_key_extractor_manager( - &storage, + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( rpc_filter_key_extractor_manager, ); + let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage @@ -422,73 +433,65 @@ pub(crate) mod tests { .await; // 2. get compact task - let mut compact_task = hummock_manager_ref + + // 3. compact + while let Some(compact_task) = hummock_manager_ref .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap() - .unwrap(); - let compaction_filter_flag = CompactionFilterFlag::NONE; - compact_task.compaction_filter_mask = compaction_filter_flag.bits(); - compact_task.current_epoch_time = 0; - - // assert compact_task - assert_eq!( - compact_task - .input_ssts - .iter() - .map(|level| level.table_infos.len()) - .sum::(), - SST_COUNT as usize / 2 + 1, - ); - compact_task.target_level = 6; - - // 3. compact - let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( - compact_ctx, - compact_task.clone(), - rx, - Box::new(sstable_object_id_manager.clone()), - ) - .await; + { + // 3. compact + let (_tx, rx) = tokio::sync::oneshot::channel(); + let (result_task, task_stats) = compact( + compact_ctx.clone(), + compact_task.clone(), + rx, + Box::new(sstable_object_id_manager.clone()), + filter_key_extractor_manager.clone(), + ) + .await; - hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) - .await - .unwrap(); + hummock_manager_ref + .report_compact_task( + result_task.task_id, + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) + .await + .unwrap(); + } // 4. get the latest version and check let version = hummock_manager_ref.get_current_version().await; - let output_table = version + let output_tables = version .get_compaction_group_levels(StaticCompactionGroupId::StateDefault.into()) .levels - .last() - .unwrap() - .table_infos - .first() - .unwrap(); - let table = storage - .sstable_store() - .sstable(output_table, &mut StoreLocalStatistic::default()) - .await - .unwrap(); - let target_table_size = storage.storage_opts().sstable_size_mb * (1 << 20); - - assert!( - table.value().meta.estimated_size > target_table_size, - "table.meta.estimated_size {} <= target_table_size {}", - table.value().meta.estimated_size, - target_table_size - ); - + .iter() + .flat_map(|level| level.table_infos.clone()) + .collect_vec(); + for output_table in &output_tables { + let table = storage + .sstable_store() + .sstable(output_table, &mut StoreLocalStatistic::default()) + .await + .unwrap(); + let target_table_size = storage.storage_opts().sstable_size_mb * (1 << 20); + assert!( + table.value().meta.estimated_size > target_table_size, + "table.meta.estimated_size {} <= target_table_size {}", + table.value().meta.estimated_size, + target_table_size + ); + } // 5. storage get back the correct kv after compaction storage.wait_version(version).await; let get_val = storage .get( - key.clone(), + TableKey(key.clone()), SST_COUNT + 1, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -541,7 +544,9 @@ pub(crate) mod tests { let mut key = idx.to_be_bytes().to_vec(); let ramdom_key = rand::thread_rng().gen::<[u8; 32]>(); key.extend_from_slice(&ramdom_key); - local.insert(Bytes::from(key), val.clone(), None).unwrap(); + local + .insert(TableKey(Bytes::from(key)), val.clone(), None) + .unwrap(); } local.flush(Vec::new()).await.unwrap(); local.seal_current_epoch(epoch + 1); @@ -550,10 +555,10 @@ pub(crate) mod tests { } } - pub(crate) fn prepare_compactor_and_filter( + pub fn prepare_compactor_and_filter( storage: &HummockStorage, existing_table_id: u32, - ) -> CompactorContext { + ) -> (CompactorContext, FilterKeyExtractorManager) { let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() { FilterKeyExtractorManager::RpcFilterKeyExtractorManager( @@ -566,10 +571,11 @@ pub(crate) mod tests { Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), ); - get_compactor_context_with_filter_key_extractor_manager( - storage, + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( rpc_filter_key_extractor_manager, - ) + ); + + (get_compactor_context(storage), filter_key_extractor_manager) } #[tokio::test] @@ -614,6 +620,7 @@ pub(crate) mod tests { ) .await .unwrap(); + assert!(compact_task.is_none()); // 3. get the latest version and check @@ -629,7 +636,7 @@ pub(crate) mod tests { let compact_task = hummock_manager_ref .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap(); @@ -677,11 +684,12 @@ pub(crate) mod tests { 2, Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), ); - - let compact_ctx = get_compactor_context_with_filter_key_extractor_manager_impl( + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( + rpc_filter_key_extractor_manager, + ); + let compact_ctx = get_compactor_context_impl( global_storage.storage_opts().clone(), global_storage.sstable_store(), - rpc_filter_key_extractor_manager, ); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -722,7 +730,9 @@ pub(crate) mod tests { prefix.put_u16(1); prefix.put_slice(random_key.as_slice()); - storage.insert(prefix.freeze(), val.clone(), None).unwrap(); + storage + .insert(TableKey(prefix.freeze()), val.clone(), None) + .unwrap(); storage.flush(Vec::new()).await.unwrap(); storage.seal_current_epoch(next_epoch); other.seal_current_epoch(next_epoch); @@ -767,16 +777,22 @@ pub(crate) mod tests { // 4. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( + let (result_task, task_stats) = compact( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), + filter_key_extractor_manager, ) .await; hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) + .report_compact_task( + result_task.task_id, + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) .await .unwrap(); @@ -805,7 +821,7 @@ pub(crate) mod tests { let compact_task = hummock_manager_ref .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap(); @@ -865,10 +881,7 @@ pub(crate) mod tests { FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), }; - let compact_ctx = get_compactor_context_with_filter_key_extractor_manager( - &storage, - rpc_filter_key_extractor_manager.clone(), - ); + let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage @@ -880,7 +893,9 @@ pub(crate) mod tests { 2, Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), ); - + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( + rpc_filter_key_extractor_manager, + ); // 1. add sstables let val = Bytes::from(b"0"[..].to_vec()); // 1 Byte value @@ -906,7 +921,9 @@ pub(crate) mod tests { prefix.put_u16(1); prefix.put_slice(random_key.as_slice()); - local.insert(prefix.freeze(), val.clone(), None).unwrap(); + local + .insert(TableKey(prefix.freeze()), val.clone(), None) + .unwrap(); local.flush(Vec::new()).await.unwrap(); local.seal_current_epoch(next_epoch); @@ -955,16 +972,22 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( + let (result_task, task_stats) = compact( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), + filter_key_extractor_manager, ) .await; hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) + .report_compact_task( + result_task.task_id, + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) .await .unwrap(); @@ -994,7 +1017,7 @@ pub(crate) mod tests { let compact_task = hummock_manager_ref .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap(); @@ -1064,10 +1087,10 @@ pub(crate) mod tests { FixedLengthFilterKeyExtractor::new(TABLE_PREFIX_LEN + key_prefix.len()), )), ); - let compact_ctx = get_compactor_context_with_filter_key_extractor_manager( - &storage, + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( rpc_filter_key_extractor_manager, ); + let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage @@ -1097,7 +1120,7 @@ pub(crate) mod tests { let ramdom_key = [key_prefix.as_ref(), &rand::thread_rng().gen::<[u8; 32]>()].concat(); local - .insert(Bytes::from(ramdom_key), val.clone(), None) + .insert(TableKey(Bytes::from(ramdom_key)), val.clone(), None) .unwrap(); local.flush(Vec::new()).await.unwrap(); local.seal_current_epoch(next_epoch); @@ -1140,16 +1163,22 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( + let (result_task, task_stats) = compact( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), + filter_key_extractor_manager, ) .await; hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) + .report_compact_task( + result_task.task_id, + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) .await .unwrap(); @@ -1180,7 +1209,7 @@ pub(crate) mod tests { let compact_task = hummock_manager_ref .get_compact_task( StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), + &mut default_compaction_selector(), ) .await .unwrap(); @@ -1196,8 +1225,8 @@ pub(crate) mod tests { key_prefix.to_vec(), ] .concat(); - let start_bound_key = key_prefix; - let end_bound_key = Bytes::from(next_key(start_bound_key.as_ref())); + let start_bound_key = TableKey(key_prefix); + let end_bound_key = TableKey(Bytes::from(next_key(start_bound_key.as_ref()))); let scan_result = storage .scan( ( @@ -1242,7 +1271,8 @@ pub(crate) mod tests { TableId::from(existing_table_id), ) .await; - let compact_ctx = prepare_compactor_and_filter(&storage, existing_table_id); + let (compact_ctx, filter_key_extractor_manager) = + prepare_compactor_and_filter(&storage, existing_table_id); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage @@ -1298,16 +1328,22 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( + let (result_task, task_stats) = compact( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), + filter_key_extractor_manager, ) .await; hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) + .report_compact_task( + result_task.task_id, + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) .await .unwrap(); @@ -1321,4 +1357,260 @@ pub(crate) mod tests { assert_eq!(1, output_level_info.table_infos.len()); assert_eq!(252, output_level_info.table_infos[0].total_key_count); } + + type KeyValue = (FullKey>, HummockValue>); + + async fn test_fast_compact_impl( + data1: Vec, + data2: Vec, + data3: Vec, + data4: Vec, + ) { + let (env, hummock_manager_ref, _cluster_manager_ref, worker_node) = + setup_compute_env(8080).await; + let hummock_meta_client: Arc = Arc::new(MockHummockMetaClient::new( + hummock_manager_ref.clone(), + worker_node.id, + )); + let existing_table_id: u32 = 1; + let storage = get_hummock_storage( + hummock_meta_client.clone(), + get_notification_client_for_test(env, hummock_manager_ref.clone(), worker_node.clone()), + &hummock_manager_ref, + TableId::from(existing_table_id), + ) + .await; + hummock_manager_ref.get_new_sst_ids(10).await.unwrap(); + let (compact_ctx, _) = prepare_compactor_and_filter(&storage, existing_table_id); + + let sstable_store = compact_ctx.sstable_store.clone(); + let capacity = 256 * 1024; + let mut options = SstableBuilderOptions { + capacity, + block_capacity: 2048, + restart_interval: 16, + bloom_false_positive: 0.1, + ..Default::default() + }; + let sst1 = gen_test_sstable_info(options.clone(), 1, data1, sstable_store.clone()).await; + let sst2 = gen_test_sstable_info(options.clone(), 2, data2, sstable_store.clone()).await; + options.compression_algorithm = CompressionAlgorithm::Lz4; + let sst3 = gen_test_sstable_info(options.clone(), 3, data3, sstable_store.clone()).await; + let sst4 = gen_test_sstable_info(options, 4, data4, sstable_store.clone()).await; + let read_options = Arc::new(SstableIteratorReadOptions::default()); + + let task = CompactTask { + input_ssts: vec![ + InputLevel { + level_idx: 5, + level_type: 1, + table_infos: vec![sst1, sst2], + }, + InputLevel { + level_idx: 6, + level_type: 1, + table_infos: vec![sst3, sst4], + }, + ], + existing_table_ids: vec![1], + task_id: 1, + watermark: 1000, + splits: vec![KeyRange::inf()], + target_level: 6, + base_level: 4, + target_file_size: capacity as u64, + compression_algorithm: 1, + gc_delete_keys: true, + ..Default::default() + }; + let deg = Arc::new(CompactionDeleteRanges::default()); + let multi_filter_key_extractor = + Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)); + let compaction_filter = DummyCompactionFilter {}; + let slow_compact_runner = CompactorRunner::new( + 0, + compact_ctx.clone(), + task.clone(), + Box::new(SharedComapctorObjectIdManager::for_test( + VecDeque::from_iter([5, 6, 7, 8, 9]), + )), + ); + let fast_compact_runner = FastCompactorRunner::new( + compact_ctx.clone(), + task.clone(), + multi_filter_key_extractor.clone(), + Box::new(SharedComapctorObjectIdManager::for_test( + VecDeque::from_iter([10, 11, 12, 13, 14]), + )), + Arc::new(TaskProgress::default()), + ); + let (_, ret1, _) = slow_compact_runner + .run( + compaction_filter, + multi_filter_key_extractor, + deg, + Arc::new(TaskProgress::default()), + ) + .await + .unwrap(); + let ret = ret1.into_iter().map(|sst| sst.sst_info).collect_vec(); + let fast_ret = fast_compact_runner + .run() + .await + .unwrap() + .into_iter() + .map(|sst| sst.sst_info) + .collect_vec(); + println!("ssts: {} vs {}", fast_ret.len(), ret.len()); + let mut fast_tables = Vec::with_capacity(fast_ret.len()); + let mut normal_tables = Vec::with_capacity(ret.len()); + let mut stats = StoreLocalStatistic::default(); + for sst_info in &fast_ret { + fast_tables.push( + compact_ctx + .sstable_store + .sstable(sst_info, &mut stats) + .await + .unwrap(), + ); + } + + for sst_info in &ret { + normal_tables.push( + compact_ctx + .sstable_store + .sstable(sst_info, &mut stats) + .await + .unwrap(), + ); + } + println!( + "fast sstables {}.file size={}", + fast_ret[0].object_id, fast_ret[0].file_size, + ); + assert!(can_concat(&ret)); + assert!(can_concat(&fast_ret)); + + let mut normal_iter = UserIterator::for_test( + ConcatIterator::new(ret, compact_ctx.sstable_store.clone(), read_options.clone()), + (Bound::Unbounded, Bound::Unbounded), + ); + let mut fast_iter = UserIterator::for_test( + ConcatIterator::new( + fast_ret, + compact_ctx.sstable_store.clone(), + read_options.clone(), + ), + (Bound::Unbounded, Bound::Unbounded), + ); + + normal_iter.rewind().await.unwrap(); + fast_iter.rewind().await.unwrap(); + let mut count = 0; + while normal_iter.is_valid() { + assert_eq!( + normal_iter.key(), + fast_iter.key(), + "not equal in {}, len: {} {} vs {}", + count, + normal_iter.key().user_key.table_key.as_ref().len(), + u64::from_be_bytes( + normal_iter.key().user_key.table_key.as_ref()[0..8] + .try_into() + .unwrap() + ), + u64::from_be_bytes( + fast_iter.key().user_key.table_key.as_ref()[0..8] + .try_into() + .unwrap() + ), + ); + let hash = Sstable::hash_for_bloom_filter( + fast_iter.key().user_key.encode().as_slice(), + fast_iter.key().user_key.table_id.table_id, + ); + assert_eq!(normal_iter.value(), fast_iter.value()); + let key_ref = fast_iter.key().user_key.as_ref(); + assert!(normal_tables.iter().any(|table| { + table + .value() + .may_match_hash(&(Bound::Included(key_ref), Bound::Included(key_ref)), hash) + })); + assert!(fast_tables.iter().any(|table| { + table + .value() + .may_match_hash(&(Bound::Included(key_ref), Bound::Included(key_ref)), hash) + })); + normal_iter.next().await.unwrap(); + fast_iter.next().await.unwrap(); + count += 1; + } + } + + #[tokio::test] + async fn test_fast_compact() { + const KEY_COUNT: usize = 20000; + let mut last_k: u64 = 0; + let mut rng = rand::rngs::StdRng::seed_from_u64( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + ); + let mut data1 = Vec::with_capacity(KEY_COUNT / 2); + let mut data = Vec::with_capacity(KEY_COUNT); + let mut last_epoch = 400; + for _ in 0..KEY_COUNT { + let rand_v = rng.next_u32() % 100; + let (k, epoch) = if rand_v == 0 { + (last_k + 3000, 400) + } else if rand_v < 5 { + (last_k, last_epoch - 1) + } else { + (last_k + 1, 400) + }; + let key = k.to_be_bytes().to_vec(); + let key = FullKey::new(TableId::new(1), TableKey(key), epoch); + let rand_v = rng.next_u32() % 10; + let v = if rand_v == 1 { + HummockValue::delete() + } else { + HummockValue::put(format!("sst1-{}", epoch).into_bytes()) + }; + if last_k != k && data1.is_empty() && data.len() >= KEY_COUNT / 2 { + std::mem::swap(&mut data, &mut data1); + } + data.push((key, v)); + last_k = k; + last_epoch = epoch; + } + let data2 = data; + let mut data3 = Vec::with_capacity(KEY_COUNT); + let mut data = Vec::with_capacity(KEY_COUNT); + let mut last_k: u64 = 0; + let max_epoch = std::cmp::min(300, last_epoch - 1); + last_epoch = max_epoch; + + for _ in 0..KEY_COUNT * 2 { + let rand_v = rng.next_u32() % 100; + let (k, epoch) = if rand_v == 0 { + (last_k + 1000, max_epoch) + } else if rand_v < 5 { + (last_k, last_epoch - 1) + } else { + (last_k + 1, max_epoch) + }; + let key = k.to_be_bytes().to_vec(); + let key = FullKey::new(TableId::new(1), TableKey(key), epoch); + let v = HummockValue::put(format!("sst2-{}", epoch).into_bytes()); + if last_k != k && data3.is_empty() && data.len() >= KEY_COUNT { + std::mem::swap(&mut data, &mut data3); + } + data.push((key, v)); + last_k = k; + last_epoch = epoch; + } + let data4 = data; + test_fast_compact_impl(data1, data2, data3, data4).await; + } } diff --git a/src/storage/hummock_test/src/failpoint_tests.rs b/src/storage/hummock_test/src/failpoint_tests.rs index 83af92d469afe..f71d479c3c55c 100644 --- a/src/storage/hummock_test/src/failpoint_tests.rs +++ b/src/storage/hummock_test/src/failpoint_tests.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use bytes::{BufMut, Bytes}; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::TableId; +use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::key::TABLE_PREFIX_LEN; use risingwave_hummock_sdk::HummockReadEpoch; use risingwave_meta::hummock::test_utils::setup_compute_env; @@ -34,7 +35,7 @@ use risingwave_storage::StateStore; use crate::get_notification_client_for_test; use crate::local_state_store_test_utils::LocalStateStoreTestExt; -use crate::test_utils::TestIngestBatch; +use crate::test_utils::{gen_key_from_str, TestIngestBatch}; #[tokio::test] #[ignore] @@ -62,15 +63,21 @@ async fn test_failpoints_state_store_read_upload() { let mut local = hummock_storage.new_local(NewLocalOptions::default()).await; - let anchor = Bytes::from("aa"); + let anchor = gen_key_from_str(VirtualNode::ZERO, "aa"); let mut batch1 = vec![ (anchor.clone(), StorageValue::new_put("111")), - (Bytes::from("cc"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "aa"), + StorageValue::new_put("222"), + ), ]; batch1.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); let mut batch2 = vec![ - (Bytes::from("cc"), StorageValue::new_put("333")), + ( + gen_key_from_str(VirtualNode::ZERO, "aa"), + StorageValue::new_put("333"), + ), (anchor.clone(), StorageValue::new_delete()), ]; // Make sure the batch is sorted. @@ -162,7 +169,10 @@ async fn test_failpoints_state_store_read_upload() { assert!(result.is_err()); let result = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), 2, ReadOptions { table_id: Default::default(), @@ -181,7 +191,7 @@ async fn test_failpoints_state_store_read_upload() { }; let value = hummock_storage .get( - Bytes::from("ee"), + gen_key_from_str(VirtualNode::ZERO, "ee"), 2, ReadOptions { prefix_hint: Some(Bytes::from(bee_prefix_hint)), @@ -233,7 +243,10 @@ async fn test_failpoints_state_store_read_upload() { assert_eq!(value, Bytes::from("111")); let iters = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), 5, ReadOptions { prefetch_options: PrefetchOptions::new_for_exhaust_iter(), diff --git a/src/storage/hummock_test/src/hummock_storage_tests.rs b/src/storage/hummock_test/src/hummock_storage_tests.rs index 4e3d6e1aed919..ce91c109a814b 100644 --- a/src/storage/hummock_test/src/hummock_storage_tests.rs +++ b/src/storage/hummock_test/src/hummock_storage_tests.rs @@ -20,7 +20,7 @@ use parking_lot::RwLock; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::TableId; use risingwave_common::hash::VirtualNode; -use risingwave_hummock_sdk::key::{map_table_key_range, FullKey, UserKey, TABLE_PREFIX_LEN}; +use risingwave_hummock_sdk::key::{FullKey, TableKey, TABLE_PREFIX_LEN}; use risingwave_rpc_client::HummockMetaClient; use risingwave_storage::hummock::store::version::{read_filter_for_batch, read_filter_for_local}; use risingwave_storage::hummock::CachePolicy; @@ -29,7 +29,7 @@ use risingwave_storage::store::*; use risingwave_storage::StateStore; use crate::local_state_store_test_utils::LocalStateStoreTestExt; -use crate::test_utils::{prepare_hummock_test_env, TestIngestBatch}; +use crate::test_utils::{gen_key_from_str, prepare_hummock_test_env, TestIngestBatch}; #[tokio::test] async fn test_storage_basic() { @@ -40,15 +40,14 @@ async fn test_storage_basic() { .storage .new_local(NewLocalOptions::for_test(TEST_TABLE_ID)) .await; - // First batch inserts the anchor and others. let mut batch1 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), StorageValue::new_put("111"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bb"), StorageValue::new_put("222"), ), ]; @@ -59,11 +58,11 @@ async fn test_storage_basic() { // Second batch modifies the anchor. let mut batch2 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc"].concat()), + gen_key_from_str(VirtualNode::ZERO, "cc"), StorageValue::new_put("333"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), StorageValue::new_put("111111"), ), ]; @@ -74,15 +73,15 @@ async fn test_storage_basic() { // Third batch deletes the anchor let mut batch3 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"dd"].concat()), + gen_key_from_str(VirtualNode::ZERO, "dd"), StorageValue::new_put("444"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"ee"].concat()), + gen_key_from_str(VirtualNode::ZERO, "ee"), StorageValue::new_put("555"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), StorageValue::new_delete(), ), ]; @@ -111,7 +110,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch1, ReadOptions { table_id: TEST_TABLE_ID, @@ -127,7 +126,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1, ReadOptions { table_id: TEST_TABLE_ID, @@ -144,7 +143,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"ab"].concat()), + gen_key_from_str(VirtualNode::ZERO, "ab"), epoch1, ReadOptions { table_id: TEST_TABLE_ID, @@ -174,7 +173,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch2, ReadOptions { table_id: TEST_TABLE_ID, @@ -207,7 +206,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch3, ReadOptions { table_id: TEST_TABLE_ID, @@ -223,7 +222,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"ff"].concat()), + gen_key_from_str(VirtualNode::ZERO, "ff"), epoch3, ReadOptions { table_id: TEST_TABLE_ID, @@ -242,9 +241,7 @@ async fn test_storage_basic() { .iter( ( Unbounded, - Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"ee"].concat(), - )), + Included(gen_key_from_str(VirtualNode::ZERO, "ee")), ), epoch1, ReadOptions { @@ -259,11 +256,9 @@ async fn test_storage_basic() { futures::pin_mut!(iter); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch1 ), Bytes::copy_from_slice(&b"111"[..]) @@ -272,11 +267,9 @@ async fn test_storage_basic() { ); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1 ), Bytes::copy_from_slice(&b"222"[..]) @@ -289,7 +282,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch1, ReadOptions { table_id: TEST_TABLE_ID, @@ -307,7 +300,7 @@ async fn test_storage_basic() { let value = test_env .storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch2, ReadOptions { table_id: TEST_TABLE_ID, @@ -326,9 +319,7 @@ async fn test_storage_basic() { .iter( ( Unbounded, - Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"ee"].concat(), - )), + Included(gen_key_from_str(VirtualNode::ZERO, "ee")), ), epoch2, ReadOptions { @@ -342,11 +333,9 @@ async fn test_storage_basic() { futures::pin_mut!(iter); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch2 ), Bytes::copy_from_slice(&b"111111"[..]) @@ -355,11 +344,9 @@ async fn test_storage_basic() { ); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1 ), Bytes::copy_from_slice(&b"222"[..]) @@ -368,11 +355,9 @@ async fn test_storage_basic() { ); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "cc"), epoch2 ), Bytes::copy_from_slice(&b"333"[..]) @@ -387,9 +372,7 @@ async fn test_storage_basic() { .iter( ( Unbounded, - Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"ee"].concat(), - )), + Included(gen_key_from_str(VirtualNode::ZERO, "ee")), ), epoch3, ReadOptions { @@ -403,11 +386,9 @@ async fn test_storage_basic() { futures::pin_mut!(iter); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1 ), Bytes::copy_from_slice(&b"222"[..]) @@ -416,11 +397,9 @@ async fn test_storage_basic() { ); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "cc"), epoch2 ), Bytes::copy_from_slice(&b"333"[..]) @@ -429,11 +408,9 @@ async fn test_storage_basic() { ); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"dd".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "dd"), epoch3 ), Bytes::copy_from_slice(&b"444"[..]) @@ -442,11 +419,9 @@ async fn test_storage_basic() { ); assert_eq!( Some(( - FullKey::for_test( + FullKey::new( TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"ee".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "ee"), epoch3 ), Bytes::copy_from_slice(&b"555"[..]) @@ -476,11 +451,11 @@ async fn test_state_store_sync() { // ingest 16B batch let mut batch1 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aaaa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aaaa"), StorageValue::new_put("1111"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bbbb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bbbb"), StorageValue::new_put("2222"), ), ]; @@ -501,15 +476,15 @@ async fn test_state_store_sync() { // ingest 24B batch let mut batch2 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cccc"].concat()), + gen_key_from_str(VirtualNode::ZERO, "cccc"), StorageValue::new_put("3333"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"dddd"].concat()), + gen_key_from_str(VirtualNode::ZERO, "dddd"), StorageValue::new_put("4444"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat()), + gen_key_from_str(VirtualNode::ZERO, "eeee"), StorageValue::new_put("5555"), ), ]; @@ -531,7 +506,7 @@ async fn test_state_store_sync() { // ingest more 8B then will trigger a sync behind the scene let mut batch3 = vec![( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat()), + gen_key_from_str(VirtualNode::ZERO, "eeee"), StorageValue::new_put("6666"), )]; batch3.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); @@ -563,26 +538,11 @@ async fn test_state_store_sync() { { let kv_map = [ - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aaaa"].concat()), - "1111", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bbbb"].concat()), - "2222", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cccc"].concat()), - "3333", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"dddd"].concat()), - "4444", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat()), - "5555", - ), + (gen_key_from_str(VirtualNode::ZERO, "aaaa"), "1111"), + (gen_key_from_str(VirtualNode::ZERO, "bbbb"), "2222"), + (gen_key_from_str(VirtualNode::ZERO, "cccc"), "3333"), + (gen_key_from_str(VirtualNode::ZERO, "dddd"), "4444"), + (gen_key_from_str(VirtualNode::ZERO, "eeee"), "5555"), ]; for (k, v) in kv_map { @@ -620,26 +580,11 @@ async fn test_state_store_sync() { { let kv_map = [ - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aaaa"].concat()), - "1111", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bbbb"].concat()), - "2222", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cccc"].concat()), - "3333", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"dddd"].concat()), - "4444", - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat()), - "6666", - ), + (gen_key_from_str(VirtualNode::ZERO, "aaaa"), "1111"), + (gen_key_from_str(VirtualNode::ZERO, "bbbb"), "2222"), + (gen_key_from_str(VirtualNode::ZERO, "cccc"), "3333"), + (gen_key_from_str(VirtualNode::ZERO, "dddd"), "4444"), + (gen_key_from_str(VirtualNode::ZERO, "eeee"), "6666"), ]; for (k, v) in kv_map { @@ -668,9 +613,7 @@ async fn test_state_store_sync() { .iter( ( Unbounded, - Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat(), - )), + Included(gen_key_from_str(VirtualNode::ZERO, "eeee")), ), epoch1, ReadOptions { @@ -684,38 +627,18 @@ async fn test_state_store_sync() { futures::pin_mut!(iter); let kv_map = [ - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aaaa"].concat()), - "1111", - epoch1, - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bbbb"].concat()), - "2222", - epoch1, - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cccc"].concat()), - "3333", - epoch1, - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"dddd"].concat()), - "4444", - epoch1, - ), - ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat()), - "5555", - epoch1, - ), + (gen_key_from_str(VirtualNode::ZERO, "aaaa"), "1111", epoch1), + (gen_key_from_str(VirtualNode::ZERO, "bbbb"), "2222", epoch1), + (gen_key_from_str(VirtualNode::ZERO, "cccc"), "3333", epoch1), + (gen_key_from_str(VirtualNode::ZERO, "dddd"), "4444", epoch1), + (gen_key_from_str(VirtualNode::ZERO, "eeee"), "5555", epoch1), ]; for (k, v, e) in kv_map { let result = iter.try_next().await.unwrap(); assert_eq!( result, - Some((FullKey::for_test(TEST_TABLE_ID, k, e), Bytes::from(v))) + Some((FullKey::new(TEST_TABLE_ID, k, e), Bytes::from(v))) ); } @@ -728,9 +651,7 @@ async fn test_state_store_sync() { .iter( ( Unbounded, - Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"eeee"].concat(), - )), + Included(gen_key_from_str(VirtualNode::ZERO, "eeee")), ), epoch2, ReadOptions { @@ -745,11 +666,11 @@ async fn test_state_store_sync() { futures::pin_mut!(iter); let kv_map = [ - (b"aaaa", "1111", epoch1), - (b"bbbb", "2222", epoch1), - (b"cccc", "3333", epoch1), - (b"dddd", "4444", epoch1), - (b"eeee", "6666", epoch2), + ("aaaa", "1111", epoch1), + ("bbbb", "2222", epoch1), + ("cccc", "3333", epoch1), + ("dddd", "4444", epoch1), + ("eeee", "6666", epoch2), ]; for (k, v, e) in kv_map { @@ -757,13 +678,7 @@ async fn test_state_store_sync() { assert_eq!( result, Some(( - FullKey::for_test( - TEST_TABLE_ID, - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), k.as_slice()].concat() - ), - e - ), + FullKey::new(TEST_TABLE_ID, gen_key_from_str(VirtualNode::ZERO, k), e), Bytes::from(v) )) ); @@ -791,8 +706,14 @@ async fn test_delete_get() { hummock_storage.init_for_test(epoch1).await.unwrap(); let batch1 = vec![ - (Bytes::from("aa"), StorageValue::new_put("111")), - (Bytes::from("bb"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "aa"), + StorageValue::new_put("111"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("222"), + ), ]; hummock_storage .ingest_batch( @@ -814,7 +735,10 @@ async fn test_delete_get() { .unwrap(); let epoch2 = initial_epoch + 2; hummock_storage.seal_current_epoch(epoch2); - let batch2 = vec![(Bytes::from("bb"), StorageValue::new_delete())]; + let batch2 = vec![( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_delete(), + )]; hummock_storage .ingest_batch( batch2, @@ -836,7 +760,7 @@ async fn test_delete_get() { assert!(test_env .storage .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch2, ReadOptions { prefix_hint: None, @@ -868,8 +792,14 @@ async fn test_multiple_epoch_sync() { let epoch1 = initial_epoch + 1; hummock_storage.init_for_test(epoch1).await.unwrap(); let batch1 = vec![ - (Bytes::from("aa"), StorageValue::new_put("111")), - (Bytes::from("bb"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("111"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("222"), + ), ]; hummock_storage .ingest_batch( @@ -885,7 +815,10 @@ async fn test_multiple_epoch_sync() { let epoch2 = initial_epoch + 2; hummock_storage.seal_current_epoch(epoch2); - let batch2 = vec![(Bytes::from("bb"), StorageValue::new_delete())]; + let batch2 = vec![( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_delete(), + )]; hummock_storage .ingest_batch( batch2, @@ -901,8 +834,14 @@ async fn test_multiple_epoch_sync() { let epoch3 = initial_epoch + 3; hummock_storage.seal_current_epoch(epoch3); let batch3 = vec![ - (Bytes::from("aa"), StorageValue::new_put("444")), - (Bytes::from("bb"), StorageValue::new_put("555")), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("444"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("555"), + ), ]; hummock_storage .ingest_batch( @@ -921,7 +860,7 @@ async fn test_multiple_epoch_sync() { assert_eq!( hummock_storage_clone .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1, ReadOptions { table_id: TEST_TABLE_ID, @@ -936,7 +875,7 @@ async fn test_multiple_epoch_sync() { ); assert!(hummock_storage_clone .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch2, ReadOptions { table_id: TEST_TABLE_ID, @@ -951,7 +890,7 @@ async fn test_multiple_epoch_sync() { assert_eq!( hummock_storage_clone .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch3, ReadOptions { table_id: TEST_TABLE_ID, @@ -1000,18 +939,15 @@ async fn test_iter_with_min_epoch() { let epoch1 = (31 * 1000) << 16; - let gen_key = |index: usize| -> String { format!("\0\0key_{}", index) }; + let gen_key = |index: usize| -> TableKey { + gen_key_from_str(VirtualNode::ZERO, format!("\0\0key_{}", index).as_str()) + }; let gen_val = |index: usize| -> String { format!("val_{}", index) }; // epoch 1 write - let batch_epoch1: Vec<(Bytes, StorageValue)> = (0..10) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch1: Vec<(TableKey, StorageValue)> = (0..10) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); hummock_storage.init_for_test(epoch1).await.unwrap(); @@ -1031,13 +967,8 @@ async fn test_iter_with_min_epoch() { let epoch2 = (32 * 1000) << 16; hummock_storage.seal_current_epoch(epoch2); // epoch 2 write - let batch_epoch2: Vec<(Bytes, StorageValue)> = (20..30) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch2: Vec<(TableKey, StorageValue)> = (20..30) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); hummock_storage @@ -1219,40 +1150,27 @@ async fn test_hummock_version_reader() { let epoch1 = (31 * 1000) << 16; - let gen_key = |index: usize| -> String { format!("\0\0key_{}", index) }; + let gen_key = |index: usize| -> TableKey { + gen_key_from_str(VirtualNode::ZERO, format!("\0\0key_{}", index).as_str()) + }; let gen_val = |index: usize| -> String { format!("val_{}", index) }; // epoch 1 write - let batch_epoch1: Vec<(Bytes, StorageValue)> = (0..10) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch1: Vec<(TableKey, StorageValue)> = (0..10) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); let epoch2 = (32 * 1000) << 16; // epoch 2 write - let batch_epoch2: Vec<(Bytes, StorageValue)> = (20..30) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch2: Vec<(TableKey, StorageValue)> = (20..30) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); let epoch3 = (33 * 1000) << 16; // epoch 3 write - let batch_epoch3: Vec<(Bytes, StorageValue)> = (40..50) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch3: Vec<(TableKey, StorageValue)> = (40..50) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); { hummock_storage.init_for_test(epoch1).await.unwrap(); @@ -1565,10 +1483,10 @@ async fn test_hummock_version_reader() { } { - let start_key = Bytes::from(gen_key(25)); - let end_key = Bytes::from(gen_key(50)); + let start_key = gen_key(25); + let end_key = gen_key(50); - let key_range = map_table_key_range((Included(start_key), Excluded(end_key))); + let key_range = (Included(start_key), Excluded(end_key)); { let read_snapshot = { @@ -1659,20 +1577,15 @@ async fn test_get_with_min_epoch() { let epoch1 = (31 * 1000) << 16; hummock_storage.init_for_test(epoch1).await.unwrap(); - let gen_key = |index: usize| -> Vec { - UserKey::for_test(TEST_TABLE_ID, format!("key_{}", index)).encode() + let gen_key = |index: usize| -> TableKey { + gen_key_from_str(VirtualNode::ZERO, format!("key_{}", index).as_str()) }; let gen_val = |index: usize| -> String { format!("val_{}", index) }; // epoch 1 write - let batch_epoch1: Vec<(Bytes, StorageValue)> = (0..10) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch1: Vec<(TableKey, StorageValue)> = (0..10) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); hummock_storage @@ -1690,13 +1603,8 @@ async fn test_get_with_min_epoch() { let epoch2 = (32 * 1000) << 16; hummock_storage.seal_current_epoch(epoch2); // epoch 2 write - let batch_epoch2: Vec<(Bytes, StorageValue)> = (20..30) - .map(|index| { - ( - Bytes::from(gen_key(index)), - StorageValue::new_put(gen_val(index)), - ) - }) + let batch_epoch2: Vec<(TableKey, StorageValue)> = (20..30) + .map(|index| (gen_key(index), StorageValue::new_put(gen_val(index)))) .collect(); hummock_storage @@ -1713,7 +1621,7 @@ async fn test_get_with_min_epoch() { { // test before sync - let k = Bytes::from(gen_key(0)); + let k = gen_key(0); let prefix_hint = { let mut ret = Vec::with_capacity(TABLE_PREFIX_LEN + k.len()); ret.put_u32(TEST_TABLE_ID.table_id()); @@ -1811,7 +1719,7 @@ async fn test_get_with_min_epoch() { .unwrap(); test_env.storage.try_wait_epoch_for_test(epoch2).await; - let k = Bytes::from(gen_key(0)); + let k = gen_key(0); let prefix_hint = { let mut ret = Vec::with_capacity(TABLE_PREFIX_LEN + k.len()); ret.put_u32(TEST_TABLE_ID.table_id()); @@ -1857,7 +1765,7 @@ async fn test_get_with_min_epoch() { } { - let k = Bytes::from(gen_key(0)); + let k = gen_key(0); let v = test_env .storage .get( @@ -1876,7 +1784,7 @@ async fn test_get_with_min_epoch() { } { - let k = Bytes::from(gen_key(0)); + let k = gen_key(0); let v = test_env .storage .get( diff --git a/src/storage/hummock_test/src/lib.rs b/src/storage/hummock_test/src/lib.rs index 73e1d8cd0eaad..593771435f1e0 100644 --- a/src/storage/hummock_test/src/lib.rs +++ b/src/storage/hummock_test/src/lib.rs @@ -17,7 +17,6 @@ #![feature(bound_map)] #![feature(type_alias_impl_trait)] #![feature(associated_type_bounds)] -#![feature(return_position_impl_trait_in_trait)] #[cfg(test)] mod compactor_tests; diff --git a/src/storage/hummock_test/src/snapshot_tests.rs b/src/storage/hummock_test/src/snapshot_tests.rs index 3870dae070903..ebc8b52358b8e 100644 --- a/src/storage/hummock_test/src/snapshot_tests.rs +++ b/src/storage/hummock_test/src/snapshot_tests.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use bytes::Bytes; use futures::TryStreamExt; use risingwave_common::cache::CachePriority; +use risingwave_hummock_sdk::key::{map_table_key_range, TableKey}; use risingwave_hummock_sdk::HummockReadEpoch; use risingwave_meta::hummock::MockHummockMetaClient; use risingwave_rpc_client::HummockMetaClient; @@ -39,7 +40,7 @@ macro_rules! assert_count_range_scan { ); let it = $storage .iter( - bounds, + map_table_key_range(bounds), $epoch, ReadOptions { prefetch_options: PrefetchOptions::new_for_exhaust_iter(), @@ -110,8 +111,8 @@ async fn test_snapshot_inner( local .ingest_batch( vec![ - (Bytes::from("1"), StorageValue::new_put("test")), - (Bytes::from("2"), StorageValue::new_put("test")), + (TableKey(Bytes::from("1")), StorageValue::new_put("test")), + (TableKey(Bytes::from("2")), StorageValue::new_put("test")), ], vec![], WriteOptions { @@ -145,9 +146,9 @@ async fn test_snapshot_inner( local .ingest_batch( vec![ - (Bytes::from("1"), StorageValue::new_delete()), - (Bytes::from("3"), StorageValue::new_put("test")), - (Bytes::from("4"), StorageValue::new_put("test")), + (TableKey(Bytes::from("1")), StorageValue::new_delete()), + (TableKey(Bytes::from("3")), StorageValue::new_put("test")), + (TableKey(Bytes::from("4")), StorageValue::new_put("test")), ], vec![], WriteOptions { @@ -182,9 +183,9 @@ async fn test_snapshot_inner( local .ingest_batch( vec![ - (Bytes::from("2"), StorageValue::new_delete()), - (Bytes::from("3"), StorageValue::new_delete()), - (Bytes::from("4"), StorageValue::new_delete()), + (TableKey(Bytes::from("2")), StorageValue::new_delete()), + (TableKey(Bytes::from("3")), StorageValue::new_delete()), + (TableKey(Bytes::from("4")), StorageValue::new_delete()), ], vec![], WriteOptions { @@ -232,10 +233,10 @@ async fn test_snapshot_range_scan_inner( local .ingest_batch( vec![ - (Bytes::from("1"), StorageValue::new_put("test")), - (Bytes::from("2"), StorageValue::new_put("test")), - (Bytes::from("3"), StorageValue::new_put("test")), - (Bytes::from("4"), StorageValue::new_put("test")), + (TableKey(Bytes::from("1")), StorageValue::new_put("test")), + (TableKey(Bytes::from("2")), StorageValue::new_put("test")), + (TableKey(Bytes::from("3")), StorageValue::new_put("test")), + (TableKey(Bytes::from("4")), StorageValue::new_put("test")), ], vec![], WriteOptions { diff --git a/src/storage/hummock_test/src/state_store_tests.rs b/src/storage/hummock_test/src/state_store_tests.rs index bc68da1f9d298..3b9752c6c6fe4 100644 --- a/src/storage/hummock_test/src/state_store_tests.rs +++ b/src/storage/hummock_test/src/state_store_tests.rs @@ -38,14 +38,16 @@ use risingwave_storage::store::*; use crate::get_notification_client_for_test; use crate::local_state_store_test_utils::LocalStateStoreTestExt; -use crate::test_utils::{with_hummock_storage_v2, HummockStateStoreTestTrait, TestIngestBatch}; +use crate::test_utils::{ + gen_key_from_str, with_hummock_storage_v2, HummockStateStoreTestTrait, TestIngestBatch, +}; #[tokio::test] async fn test_empty_read_v2() { let (hummock_storage, _meta_client) = with_hummock_storage_v2(Default::default()).await; assert!(hummock_storage .get( - Bytes::from("test_key"), + gen_key_from_str(VirtualNode::ZERO, "test_key"), u64::MAX, ReadOptions { table_id: TableId { table_id: 2333 }, @@ -82,12 +84,15 @@ async fn test_basic_inner( hummock_storage: impl HummockStateStoreTestTrait, meta_client: Arc, ) { - let anchor = Bytes::from("aa"); + let anchor = gen_key_from_str(VirtualNode::ZERO, "aa"); // First batch inserts the anchor and others. let mut batch1 = vec![ (anchor.clone(), StorageValue::new_put("111")), - (Bytes::from("bb"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("222"), + ), ]; // Make sure the batch is sorted. @@ -95,7 +100,10 @@ async fn test_basic_inner( // Second batch modifies the anchor. let mut batch2 = vec![ - (Bytes::from("cc"), StorageValue::new_put("333")), + ( + gen_key_from_str(VirtualNode::ZERO, "cc"), + StorageValue::new_put("333"), + ), (anchor.clone(), StorageValue::new_put("111111")), ]; @@ -104,8 +112,14 @@ async fn test_basic_inner( // Third batch deletes the anchor let mut batch3 = vec![ - (Bytes::from("dd"), StorageValue::new_put("444")), - (Bytes::from("ee"), StorageValue::new_put("555")), + ( + gen_key_from_str(VirtualNode::ZERO, "dd"), + StorageValue::new_put("444"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "ee"), + StorageValue::new_put("555"), + ), (anchor.clone(), StorageValue::new_delete()), ]; @@ -165,7 +179,7 @@ async fn test_basic_inner( assert_eq!(value, Bytes::from("111")); let value = hummock_storage .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -180,7 +194,7 @@ async fn test_basic_inner( // Test looking for a nonexistent key. `next()` would return the next key. let value = hummock_storage .get( - Bytes::from("ab"), + gen_key_from_str(VirtualNode::ZERO, "ab"), epoch1, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -255,7 +269,7 @@ async fn test_basic_inner( // Get non-existent maximum key. let value = hummock_storage .get( - Bytes::from("ff"), + gen_key_from_str(VirtualNode::ZERO, "ff"), epoch3, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -269,7 +283,10 @@ async fn test_basic_inner( // Write aa bb let iter = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), epoch1, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -313,7 +330,10 @@ async fn test_basic_inner( // Update aa, write cc let iter = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), epoch2, ReadOptions { prefetch_options: PrefetchOptions::new_for_exhaust_iter(), @@ -329,7 +349,10 @@ async fn test_basic_inner( // Delete aa, write dd,ee let iter = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), epoch3, ReadOptions { prefetch_options: PrefetchOptions::new_for_exhaust_iter(), @@ -353,7 +376,7 @@ async fn test_basic_inner( .unwrap(); let value = hummock_storage .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch2, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -366,7 +389,7 @@ async fn test_basic_inner( assert_eq!(value, Bytes::from("222")); let value = hummock_storage .get( - Bytes::from("dd"), + gen_key_from_str(VirtualNode::ZERO, "dd"), epoch2, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -392,8 +415,14 @@ async fn test_state_store_sync_inner( // ingest 16B batch let mut batch1 = vec![ - (Bytes::from("\0\0aaaa"), StorageValue::new_put("1111")), - (Bytes::from("\0\0bbbb"), StorageValue::new_put("2222")), + ( + gen_key_from_str(VirtualNode::ZERO, "\0\0aaaa"), + StorageValue::new_put("1111"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "\0\0bbbb"), + StorageValue::new_put("2222"), + ), ]; // Make sure the batch is sorted. @@ -418,15 +447,15 @@ async fn test_state_store_sync_inner( // ingest 24B batch let mut batch2 = vec![ ( - Bytes::copy_from_slice(b"\0\0cccc"), + gen_key_from_str(VirtualNode::ZERO, "\0\0cccc"), StorageValue::new_put("3333"), ), ( - Bytes::copy_from_slice(b"\0\0dddd"), + gen_key_from_str(VirtualNode::ZERO, "\0\0dddd"), StorageValue::new_put("4444"), ), ( - Bytes::copy_from_slice(b"\0\0eeee"), + gen_key_from_str(VirtualNode::ZERO, "\0\0eeee"), StorageValue::new_put("5555"), ), ]; @@ -457,7 +486,7 @@ async fn test_state_store_sync_inner( // ingest more 8B then will trigger a sync behind the scene let mut batch3 = vec![( - Bytes::copy_from_slice(b"\0\0eeee"), + gen_key_from_str(VirtualNode::ZERO, "\0\0eeee"), StorageValue::new_put("5555"), )]; batch3.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); @@ -504,12 +533,15 @@ async fn test_reload_storage() { let (env, hummock_manager_ref, _cluster_manager_ref, worker_node) = setup_compute_env(8080).await; let (hummock_storage, meta_client) = with_hummock_storage_v2(Default::default()).await; - let anchor = Bytes::from("aa"); + let anchor = gen_key_from_str(VirtualNode::ZERO, "aa"); // First batch inserts the anchor and others. let mut batch1 = vec![ (anchor.clone(), StorageValue::new_put("111")), - (Bytes::from("bb"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("222"), + ), ]; // Make sure the batch is sorted. @@ -517,7 +549,10 @@ async fn test_reload_storage() { // Second batch modifies the anchor. let mut batch2 = vec![ - (Bytes::from("cc"), StorageValue::new_put("333")), + ( + gen_key_from_str(VirtualNode::ZERO, "cc"), + StorageValue::new_put("333"), + ), (anchor.clone(), StorageValue::new_put("111111")), ]; @@ -570,7 +605,7 @@ async fn test_reload_storage() { // Test looking for a nonexistent key. `next()` would return the next key. let value = hummock_storage .get( - Bytes::from("ab"), + gen_key_from_str(VirtualNode::ZERO, "ab"), epoch1, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -614,7 +649,10 @@ async fn test_reload_storage() { // Write aa bb let iter = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), epoch1, ReadOptions { prefetch_options: PrefetchOptions::new_for_exhaust_iter(), @@ -659,7 +697,10 @@ async fn test_reload_storage() { // Update aa, write cc let iter = hummock_storage .iter( - (Bound::Unbounded, Bound::Included(Bytes::from("ee"))), + ( + Bound::Unbounded, + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "ee")), + ), epoch2, ReadOptions { prefetch_options: PrefetchOptions::new_for_exhaust_iter(), @@ -695,7 +736,7 @@ async fn test_write_anytime_inner( "111".as_bytes(), hummock_storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -710,7 +751,7 @@ async fn test_write_anytime_inner( "222".as_bytes(), hummock_storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -725,7 +766,7 @@ async fn test_write_anytime_inner( "333".as_bytes(), hummock_storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc"].concat()), + gen_key_from_str(VirtualNode::ZERO, "cc"), epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -740,12 +781,8 @@ async fn test_write_anytime_inner( let iter = hummock_storage .iter( ( - Bound::Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat(), - )), - Bound::Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc"].concat(), - )), + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "aa")), + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "cc")), ), epoch, ReadOptions { @@ -758,11 +795,9 @@ async fn test_write_anytime_inner( futures::pin_mut!(iter); assert_eq!( ( - FullKey::for_test( + FullKey::new( TableId::default(), - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch ), Bytes::from("111") @@ -771,11 +806,9 @@ async fn test_write_anytime_inner( ); assert_eq!( ( - FullKey::for_test( + FullKey::new( TableId::default(), - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch ), Bytes::from("222") @@ -784,11 +817,9 @@ async fn test_write_anytime_inner( ); assert_eq!( ( - FullKey::for_test( + FullKey::new( TableId::default(), - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "cc"), epoch ), Bytes::from("333") @@ -801,15 +832,15 @@ async fn test_write_anytime_inner( let batch1 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), StorageValue::new_put("111"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bb"), StorageValue::new_put("222"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc"].concat()), + gen_key_from_str(VirtualNode::ZERO, "cc"), StorageValue::new_put("333"), ), ]; @@ -838,7 +869,7 @@ async fn test_write_anytime_inner( "111_new".as_bytes(), hummock_storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -852,7 +883,7 @@ async fn test_write_anytime_inner( assert!(hummock_storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -866,7 +897,7 @@ async fn test_write_anytime_inner( "333".as_bytes(), hummock_storage .get( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc"].concat()), + gen_key_from_str(VirtualNode::ZERO, "cc"), epoch, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -880,12 +911,8 @@ async fn test_write_anytime_inner( let iter = hummock_storage .iter( ( - Bound::Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat(), - )), - Bound::Included(Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc"].concat(), - )), + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "aa")), + Bound::Included(gen_key_from_str(VirtualNode::ZERO, "cc")), ), epoch, ReadOptions { @@ -898,11 +925,9 @@ async fn test_write_anytime_inner( futures::pin_mut!(iter); assert_eq!( ( - FullKey::for_test( + FullKey::new( TableId::default(), - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "aa"), epoch ), Bytes::from("111_new") @@ -911,11 +936,9 @@ async fn test_write_anytime_inner( ); assert_eq!( ( - FullKey::for_test( + FullKey::new( TableId::default(), - Bytes::from( - [VirtualNode::ZERO.to_be_bytes().as_slice(), b"cc".as_slice()].concat() - ), + gen_key_from_str(VirtualNode::ZERO, "cc"), epoch ), Bytes::from("333") @@ -929,11 +952,11 @@ async fn test_write_anytime_inner( // Update aa, delete bb, cc unchanged let batch2 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aa"), StorageValue::new_put("111_new"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bb"), StorageValue::new_delete(), ), ]; @@ -1006,8 +1029,14 @@ async fn test_delete_get_inner( let initial_epoch = hummock_storage.get_pinned_version().max_committed_epoch(); let epoch1 = initial_epoch + 1; let batch1 = vec![ - (Bytes::from("aa"), StorageValue::new_put("111")), - (Bytes::from("bb"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "aa"), + StorageValue::new_put("111"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("222"), + ), ]; let mut local = hummock_storage.new_local(NewLocalOptions::default()).await; local.init_for_test(epoch1).await.unwrap(); @@ -1031,7 +1060,10 @@ async fn test_delete_get_inner( let epoch2 = initial_epoch + 2; local.seal_current_epoch(epoch2); - let batch2 = vec![(Bytes::from("bb"), StorageValue::new_delete())]; + let batch2 = vec![( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_delete(), + )]; local .ingest_batch( batch2, @@ -1056,7 +1088,7 @@ async fn test_delete_get_inner( .unwrap(); assert!(hummock_storage .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch2, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -1081,8 +1113,14 @@ async fn test_multiple_epoch_sync_inner( let initial_epoch = hummock_storage.get_pinned_version().max_committed_epoch(); let epoch1 = initial_epoch + 1; let batch1 = vec![ - (Bytes::from("aa"), StorageValue::new_put("111")), - (Bytes::from("bb"), StorageValue::new_put("222")), + ( + gen_key_from_str(VirtualNode::ZERO, "aa"), + StorageValue::new_put("111"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("222"), + ), ]; let mut local = hummock_storage.new_local(NewLocalOptions::default()).await; @@ -1101,7 +1139,10 @@ async fn test_multiple_epoch_sync_inner( let epoch2 = initial_epoch + 2; local.seal_current_epoch(epoch2); - let batch2 = vec![(Bytes::from("bb"), StorageValue::new_delete())]; + let batch2 = vec![( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_delete(), + )]; local .ingest_batch( batch2, @@ -1116,8 +1157,14 @@ async fn test_multiple_epoch_sync_inner( let epoch3 = initial_epoch + 3; let batch3 = vec![ - (Bytes::from("aa"), StorageValue::new_put("444")), - (Bytes::from("bb"), StorageValue::new_put("555")), + ( + gen_key_from_str(VirtualNode::ZERO, "aa"), + StorageValue::new_put("444"), + ), + ( + gen_key_from_str(VirtualNode::ZERO, "bb"), + StorageValue::new_put("555"), + ), ]; local.seal_current_epoch(epoch3); local @@ -1138,7 +1185,7 @@ async fn test_multiple_epoch_sync_inner( assert_eq!( hummock_storage_clone .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch1, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -1152,7 +1199,7 @@ async fn test_multiple_epoch_sync_inner( ); assert!(hummock_storage_clone .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch2, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -1165,7 +1212,7 @@ async fn test_multiple_epoch_sync_inner( assert_eq!( hummock_storage_clone .get( - Bytes::from("bb"), + gen_key_from_str(VirtualNode::ZERO, "bb"), epoch3, ReadOptions { cache_policy: CachePolicy::Fill(CachePriority::High), @@ -1220,10 +1267,18 @@ async fn test_gc_watermark_and_clear_shared_buffer() { let epoch1 = initial_epoch + 1; local_hummock_storage.init_for_test(epoch1).await.unwrap(); local_hummock_storage - .insert(Bytes::from("aa"), Bytes::from("111"), None) + .insert( + gen_key_from_str(VirtualNode::ZERO, "aa"), + Bytes::from("111"), + None, + ) .unwrap(); local_hummock_storage - .insert(Bytes::from("bb"), Bytes::from("222"), None) + .insert( + gen_key_from_str(VirtualNode::ZERO, "bb"), + Bytes::from("222"), + None, + ) .unwrap(); local_hummock_storage.flush(Vec::new()).await.unwrap(); @@ -1237,7 +1292,10 @@ async fn test_gc_watermark_and_clear_shared_buffer() { let epoch2 = initial_epoch + 2; local_hummock_storage.seal_current_epoch(epoch2); local_hummock_storage - .delete(Bytes::from("bb"), Bytes::from("222")) + .delete( + gen_key_from_str(VirtualNode::ZERO, "bb"), + Bytes::from("222"), + ) .unwrap(); local_hummock_storage.flush(Vec::new()).await.unwrap(); @@ -1343,11 +1401,11 @@ async fn test_replicated_local_hummock_storage() { // ingest 16B batch let mut batch1 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"aaaa"].concat()), + gen_key_from_str(VirtualNode::ZERO, "aaaa"), StorageValue::new_put("1111"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"bbbb"].concat()), + gen_key_from_str(VirtualNode::ZERO, "bbbb"), StorageValue::new_put("2222"), ), ]; @@ -1414,11 +1472,11 @@ async fn test_replicated_local_hummock_storage() { // ingest 16B batch let mut batch2 = vec![ ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"cccc"].concat()), + gen_key_from_str(VirtualNode::ZERO, "cccc"), StorageValue::new_put("3333"), ), ( - Bytes::from([VirtualNode::ZERO.to_be_bytes().as_slice(), b"dddd"].concat()), + gen_key_from_str(VirtualNode::ZERO, "dddd"), StorageValue::new_put("4444"), ), ]; diff --git a/src/storage/hummock_test/src/sync_point_tests.rs b/src/storage/hummock_test/src/sync_point_tests.rs index 7194dd2d963ea..1b7d6d80029a5 100644 --- a/src/storage/hummock_test/src/sync_point_tests.rs +++ b/src/storage/hummock_test/src/sync_point_tests.rs @@ -21,33 +21,30 @@ use bytes::Bytes; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::hummock::CompactionFilterFlag; use risingwave_common::catalog::TableId; +use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::compaction_group::hummock_version_ext::HummockVersionExt; use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; use risingwave_hummock_sdk::key::{next_key, user_key}; use risingwave_hummock_sdk::table_stats::to_prost_table_stats_map; use risingwave_hummock_sdk::HummockVersionId; use risingwave_meta::hummock::compaction::compaction_config::CompactionConfigBuilder; -use risingwave_meta::hummock::compaction::{default_level_selector, ManualCompactionOption}; -use risingwave_meta::hummock::test_utils::{ - add_ssts, register_table_ids_to_compaction_group, setup_compute_env, - setup_compute_env_with_config, -}; +use risingwave_meta::hummock::compaction::selector::ManualCompactionOption; +use risingwave_meta::hummock::test_utils::{setup_compute_env, setup_compute_env_with_config}; use risingwave_meta::hummock::{HummockManagerRef, MockHummockMetaClient}; -use risingwave_meta::manager::LocalNotification; -use risingwave_pb::hummock::compact_task::TaskStatus; use risingwave_rpc_client::HummockMetaClient; +use risingwave_storage::filter_key_extractor::FilterKeyExtractorManager; use risingwave_storage::hummock::compactor::compactor_runner::compact; use risingwave_storage::hummock::compactor::CompactorContext; use risingwave_storage::hummock::{CachePolicy, GetObjectId, SstableObjectIdManager}; -use risingwave_storage::store::{LocalStateStore, NewLocalOptions, ReadOptions}; +use risingwave_storage::store::{LocalStateStore, NewLocalOptions, ReadOptions, StateStoreRead}; use risingwave_storage::StateStore; use serial_test::serial; -use super::compactor_tests::tests::{ - flush_and_commit, get_hummock_storage, prepare_compactor_and_filter, -}; +use super::compactor_tests::tests::{get_hummock_storage, prepare_compactor_and_filter}; +use crate::compactor_tests::tests::flush_and_commit; use crate::get_notification_client_for_test; use crate::local_state_store_test_utils::LocalStateStoreTestExt; +use crate::test_utils::gen_key_from_bytes; #[tokio::test] #[cfg(feature = "sync_point")] @@ -179,57 +176,10 @@ async fn test_syncpoints_test_failpoints_fetch_ids() { } } -#[tokio::test] -#[cfg(feature = "sync_point")] -#[serial] -async fn test_syncpoints_test_local_notification_receiver() { - let (env, hummock_manager, _cluster_manager, worker_node) = setup_compute_env(80).await; - let context_id = worker_node.id; - - register_table_ids_to_compaction_group( - hummock_manager.as_ref(), - &[1], - StaticCompactionGroupId::StateDefault.into(), - ) - .await; - // Test cancel compaction task - let _sst_infos = add_ssts(1, hummock_manager.as_ref(), context_id).await; - let mut task = hummock_manager - .get_compact_task( - StaticCompactionGroupId::StateDefault.into(), - &mut default_level_selector(), - ) - .await - .unwrap() - .unwrap(); - task.task_status = TaskStatus::ManualCanceled as i32; - assert_eq!(hummock_manager.list_all_tasks_ids().await.len(), 1); - env.notification_manager() - .notify_local_subscribers(LocalNotification::CompactionTaskNeedCancel(task)) - .await; - sync_point::wait_timeout( - "AFTER_CANCEL_COMPACTION_TASK_ASYNC", - Duration::from_secs(10), - ) - .await - .unwrap(); - assert_eq!(hummock_manager.list_all_tasks_ids().await.len(), 0); - - // Test release hummock contexts - env.notification_manager() - .notify_local_subscribers(LocalNotification::WorkerNodeDeleted(worker_node)) - .await; - sync_point::wait_timeout( - "AFTER_RELEASE_HUMMOCK_CONTEXTS_ASYNC", - Duration::from_secs(10), - ) - .await - .unwrap(); -} - pub async fn compact_once( hummock_manager_ref: HummockManagerRef, compact_ctx: CompactorContext, + filter_key_extractor_manager: FilterKeyExtractorManager, sstable_object_id_manager: Arc, ) { // 2. get compact task @@ -252,16 +202,22 @@ pub async fn compact_once( compact_task.compaction_filter_mask = compaction_filter_flag.bits(); // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let (mut result_task, task_stats) = compact( + let (result_task, task_stats) = compact( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager), + filter_key_extractor_manager.clone(), ) .await; hummock_manager_ref - .report_compact_task(&mut result_task, Some(to_prost_table_stats_map(task_stats))) + .report_compact_task( + result_task.task_id, + result_task.task_status(), + result_task.sorted_output_ssts, + Some(to_prost_table_stats_map(task_stats)), + ) .await .unwrap(); } @@ -289,7 +245,8 @@ async fn test_syncpoints_get_in_delete_range_boundary() { TableId::from(existing_table_id), ) .await; - let compact_ctx = prepare_compactor_and_filter(&storage, existing_table_id); + let (compact_ctx, filter_key_extractor_manager) = + prepare_compactor_and_filter(&storage, existing_table_id); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -308,25 +265,37 @@ async fn test_syncpoints_get_in_delete_range_boundary() { let val1 = Bytes::from(b"1"[..].repeat(1 << 10)); // 1024 Byte value local.init_for_test(100).await.unwrap(); - let mut start_key = b"\0\0aaa".to_vec(); + let mut start_key = b"aaa".to_vec(); for _ in 0..10 { local .insert( - Bytes::copy_from_slice(start_key.as_slice()), + gen_key_from_bytes(VirtualNode::ZERO, start_key.as_slice()), val0.clone(), None, ) .unwrap(); - start_key = next_key(&start_key); + start_key = next_key(start_key.as_slice()); } local - .insert(Bytes::from(b"\0\0ggg".as_slice()), val0.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"ggg"), + val0.clone(), + None, + ) .unwrap(); local - .insert(Bytes::from(b"\0\0hhh".as_slice()), val0.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"hhh"), + val0.clone(), + None, + ) .unwrap(); local - .insert(Bytes::from(b"\0\0kkk".as_slice()), val0.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"kkk"), + val0.clone(), + None, + ) .unwrap(); local.flush(Vec::new()).await.unwrap(); local.seal_current_epoch(101); @@ -334,15 +303,24 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), + filter_key_extractor_manager.clone(), sstable_object_id_manager.clone(), ) .await; local - .insert(Bytes::from(b"\0\0aaa".as_slice()), val1.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"aaa"), + val1.clone(), + None, + ) .unwrap(); local - .insert(Bytes::from(b"\0\0bbb".as_slice()), val1.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"bbb"), + val1.clone(), + None, + ) .unwrap(); local .flush(vec![( @@ -356,15 +334,24 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), + filter_key_extractor_manager.clone(), sstable_object_id_manager.clone(), ) .await; local - .insert(Bytes::from(b"\0\0hhh".as_slice()), val1.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"hhh"), + val1.clone(), + None, + ) .unwrap(); local - .insert(Bytes::from(b"\0\0iii".as_slice()), val1.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"iii"), + val1.clone(), + None, + ) .unwrap(); local .flush(vec![( @@ -379,15 +366,24 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), + filter_key_extractor_manager.clone(), sstable_object_id_manager.clone(), ) .await; local - .insert(Bytes::from(b"\0\0lll".as_slice()), val1.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"lll"), + val1.clone(), + None, + ) .unwrap(); local - .insert(Bytes::from(b"\0\0mmm".as_slice()), val1.clone(), None) + .insert( + gen_key_from_bytes(VirtualNode::ZERO, b"mmm"), + val1.clone(), + None, + ) .unwrap(); local.flush(Vec::new()).await.unwrap(); local.seal_current_epoch(u64::MAX); @@ -396,6 +392,7 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), + filter_key_extractor_manager.clone(), sstable_object_id_manager.clone(), ) .await; @@ -424,22 +421,38 @@ async fn test_syncpoints_get_in_delete_range_boundary() { ..Default::default() }; let get_result = storage - .get(Bytes::from("\0\0hhh"), 120, read_options.clone()) + .get( + gen_key_from_bytes(VirtualNode::ZERO, b"hhh"), + 120, + read_options.clone(), + ) .await .unwrap(); assert_eq!(get_result.unwrap(), val1); let get_result = storage - .get(Bytes::from("\0\0ggg"), 120, read_options.clone()) + .get( + gen_key_from_bytes(VirtualNode::ZERO, b"ggg"), + 120, + read_options.clone(), + ) .await .unwrap(); assert!(get_result.is_none()); let get_result = storage - .get(Bytes::from("\0\0aaa"), 120, read_options.clone()) + .get( + gen_key_from_bytes(VirtualNode::ZERO, b"aaa"), + 120, + read_options.clone(), + ) .await .unwrap(); assert_eq!(get_result.unwrap(), val1); let get_result = storage - .get(Bytes::from("\0\0aab"), 120, read_options.clone()) + .get( + gen_key_from_bytes(VirtualNode::ZERO, b"aab"), + 120, + read_options.clone(), + ) .await .unwrap(); assert_eq!(get_result.unwrap(), val0); @@ -452,7 +465,11 @@ async fn test_syncpoints_get_in_delete_range_boundary() { } }); let get_result = storage - .get(Bytes::from("\0\0kkk"), 120, read_options.clone()) + .get( + gen_key_from_bytes(VirtualNode::ZERO, b"kkk"), + 120, + read_options.clone(), + ) .await .unwrap(); assert_eq!(get_result.unwrap(), val0); diff --git a/src/storage/hummock_test/src/test_utils.rs b/src/storage/hummock_test/src/test_utils.rs index d69c835930aa2..cd713e3977777 100644 --- a/src/storage/hummock_test/src/test_utils.rs +++ b/src/storage/hummock_test/src/test_utils.rs @@ -18,8 +18,10 @@ use std::sync::Arc; use bytes::Bytes; use itertools::Itertools; use risingwave_common::catalog::TableId; +use risingwave_common::hash::VirtualNode; use risingwave_common_service::observer_manager::ObserverManager; use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; +use risingwave_hummock_sdk::key::TableKey; use risingwave_meta::hummock::test_utils::{ register_table_ids_to_compaction_group, setup_compute_env, }; @@ -48,6 +50,16 @@ use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use crate::mock_notification_client::get_notification_client_for_test; +pub fn gen_key_from_bytes(vnode: VirtualNode, payload: &[u8]) -> TableKey { + TableKey(Bytes::from( + [vnode.to_be_bytes().as_slice(), payload].concat(), + )) +} + +pub fn gen_key_from_str(vnode: VirtualNode, payload: &str) -> TableKey { + gen_key_from_bytes(vnode, payload.as_bytes()) +} + pub async fn prepare_first_valid_version( env: MetaSrvEnv, hummock_manager_ref: HummockManagerRef, @@ -91,7 +103,7 @@ pub async fn prepare_first_valid_version( pub trait TestIngestBatch: LocalStateStore { async fn ingest_batch( &mut self, - kv_pairs: Vec<(Bytes, StorageValue)>, + kv_pairs: Vec<(TableKey, StorageValue)>, delete_ranges: Vec<(Bound, Bound)>, write_options: WriteOptions, ) -> StorageResult; @@ -101,7 +113,7 @@ pub trait TestIngestBatch: LocalStateStore { impl TestIngestBatch for S { async fn ingest_batch( &mut self, - kv_pairs: Vec<(Bytes, StorageValue)>, + kv_pairs: Vec<(TableKey, StorageValue)>, delete_ranges: Vec<(Bound, Bound)>, write_options: WriteOptions, ) -> StorageResult { diff --git a/src/storage/hummock_trace/Cargo.toml b/src/storage/hummock_trace/Cargo.toml index b9fc5e935c8b4..316447fb41934 100644 --- a/src/storage/hummock_trace/Cargo.toml +++ b/src/storage/hummock_trace/Cargo.toml @@ -14,9 +14,9 @@ bincode = { version = "=2.0.0-rc.3", features = ["serde"] } byteorder = "1" bytes = { version = "1", features = ["serde"] } futures = { version = "0.3", default-features = false, features = ["alloc"] } -futures-async-stream = "0.2" +futures-async-stream = "0.2.9" parking_lot = "0.12" -prost = "0.11" +prost = { workspace = true } risingwave_common = { workspace = true } risingwave_hummock_sdk = { workspace = true } risingwave_pb = { workspace = true } diff --git a/src/storage/hummock_trace/src/lib.rs b/src/storage/hummock_trace/src/lib.rs index df757c58cc4fa..8c6c8913205ab 100644 --- a/src/storage/hummock_trace/src/lib.rs +++ b/src/storage/hummock_trace/src/lib.rs @@ -16,7 +16,7 @@ #![feature(cursor_remaining)] #![feature(bound_map)] #![feature(trait_alias)] -#![feature(generators)] +#![feature(coroutines)] mod collector; mod error; diff --git a/src/storage/src/filter_key_extractor.rs b/src/storage/src/filter_key_extractor.rs index 47ce552300e22..b5a79a6f6b42f 100644 --- a/src/storage/src/filter_key_extractor.rs +++ b/src/storage/src/filter_key_extractor.rs @@ -88,8 +88,8 @@ for_all_filter_key_extractor_variants! { impl_filter_key_extractor } pub struct FullKeyFilterKeyExtractor; impl FilterKeyExtractor for FullKeyFilterKeyExtractor { - fn extract<'a>(&self, full_key: &'a [u8]) -> &'a [u8] { - full_key + fn extract<'a>(&self, user_key: &'a [u8]) -> &'a [u8] { + user_key } } @@ -448,7 +448,7 @@ mod tests { use risingwave_common::util::sort_util::OrderType; use risingwave_hummock_sdk::key::TABLE_PREFIX_LEN; use risingwave_pb::catalog::table::TableType; - use risingwave_pb::catalog::PbTable; + use risingwave_pb::catalog::{PbCreateType, PbStreamJobStatus, PbTable}; use risingwave_pb::common::{PbColumnOrder, PbDirection, PbNullsAre, PbOrderType}; use risingwave_pb::plan_common::PbColumnCatalog; @@ -549,6 +549,8 @@ mod tests { cardinality: None, created_at_epoch: None, cleaned_by_watermark: false, + stream_job_status: PbStreamJobStatus::Created.into(), + create_type: PbCreateType::Foreground.into(), } } diff --git a/src/storage/src/hummock/block_cache.rs b/src/storage/src/hummock/block_cache.rs index a9162f037f55a..1260b3c881486 100644 --- a/src/storage/src/hummock/block_cache.rs +++ b/src/storage/src/hummock/block_cache.rs @@ -17,6 +17,7 @@ use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::sync::Arc; +use await_tree::InstrumentAwait; use futures::Future; use risingwave_common::cache::{ CachePriority, CacheableEntry, LookupResponse, LruCache, LruCacheEventListener, @@ -99,10 +100,12 @@ impl BlockResponse { match self { BlockResponse::Block(block_holder) => Ok(block_holder), BlockResponse::WaitPendingRequest(receiver) => receiver + .verbose_instrument_await("wait_pending_fetch_block") .await .map_err(|recv_error| recv_error.into()) .map(BlockHolder::from_cached_block), BlockResponse::Miss(join_handle) => join_handle + .verbose_instrument_await("fetch_block") .await .unwrap() .map(BlockHolder::from_cached_block), diff --git a/src/storage/src/hummock/compactor/compaction_utils.rs b/src/storage/src/hummock/compactor/compaction_utils.rs index b1fd6b5411643..3c1332d09317c 100644 --- a/src/storage/src/hummock/compactor/compaction_utils.rs +++ b/src/storage/src/hummock/compactor/compaction_utils.rs @@ -107,7 +107,7 @@ impl CompactionStatistics { } } -#[derive(Clone)] +#[derive(Clone, Default)] pub struct TaskConfig { pub key_range: KeyRange, pub cache_policy: CachePolicy, @@ -158,6 +158,63 @@ pub fn build_multi_compaction_filter(compact_task: &CompactTask) -> MultiCompact multi_filter } +const MAX_FILE_COUNT: usize = 32; + +fn generate_splits_fast( + sstable_infos: &Vec, + compaction_size: u64, + context: CompactorContext, +) -> HummockResult> { + let worker_num = context.compaction_executor.worker_num(); + let parallel_compact_size = (context.storage_opts.parallel_compact_size_mb as u64) << 20; + + let parallelism = (compaction_size + parallel_compact_size - 1) / parallel_compact_size; + + let parallelism = std::cmp::min( + worker_num, + std::cmp::min( + parallelism as usize, + context.storage_opts.max_sub_compaction as usize, + ), + ); + let mut indexes = vec![]; + for sst in sstable_infos { + let key_range = sst.key_range.as_ref().unwrap(); + indexes.push( + FullKey { + user_key: FullKey::decode(&key_range.left).user_key, + epoch: HummockEpoch::MAX, + } + .encode(), + ); + indexes.push( + FullKey { + user_key: FullKey::decode(&key_range.right).user_key, + epoch: HummockEpoch::MAX, + } + .encode(), + ); + } + indexes.sort_by(|a, b| KeyComparator::compare_encoded_full_key(a.as_ref(), b.as_ref())); + indexes.dedup(); + if indexes.len() <= parallelism { + return Ok(vec![]); + } + let mut splits = vec![]; + splits.push(KeyRange_vec::new(vec![], vec![])); + let parallel_key_count = indexes.len() / parallelism; + let mut last_split_key_count = 0; + for key in indexes { + if last_split_key_count >= parallel_key_count { + splits.last_mut().unwrap().right = key.clone(); + splits.push(KeyRange_vec::new(key.clone(), vec![])); + last_split_key_count = 0; + } + last_split_key_count += 1; + } + Ok(splits) +} + pub async fn generate_splits( sstable_infos: &Vec, compaction_size: u64, @@ -165,6 +222,9 @@ pub async fn generate_splits( ) -> HummockResult> { let parallel_compact_size = (context.storage_opts.parallel_compact_size_mb as u64) << 20; if compaction_size > parallel_compact_size { + if sstable_infos.len() > MAX_FILE_COUNT { + return generate_splits_fast(sstable_infos, compaction_size, context); + } let mut indexes = vec![]; // preload the meta and get the smallest key to split sub_compaction for sstable_info in sstable_infos { @@ -193,6 +253,7 @@ pub async fn generate_splits( indexes.sort_by(|a, b| KeyComparator::compare_encoded_full_key(a.1.as_ref(), b.1.as_ref())); let mut splits = vec![]; splits.push(KeyRange_vec::new(vec![], vec![])); + let worker_num = context.compaction_executor.worker_num(); let parallelism = std::cmp::min( diff --git a/src/storage/src/hummock/compactor/compactor_runner.rs b/src/storage/src/hummock/compactor/compactor_runner.rs index be13e32893d64..a21016014d247 100644 --- a/src/storage/src/hummock/compactor/compactor_runner.rs +++ b/src/storage/src/hummock/compactor/compactor_runner.rs @@ -26,19 +26,21 @@ use risingwave_hummock_sdk::key::{FullKey, PointRange}; use risingwave_hummock_sdk::key_range::{KeyRange, KeyRangeCommon}; use risingwave_hummock_sdk::table_stats::{add_table_stats_map, TableStats, TableStatsMap}; use risingwave_hummock_sdk::{can_concat, HummockEpoch}; -use risingwave_pb::hummock::compact_task::TaskStatus; -use risingwave_pb::hummock::{CompactTask, LevelType, SstableInfo}; +use risingwave_pb::hummock::compact_task::{TaskStatus, TaskType}; +use risingwave_pb::hummock::{BloomFilterType, CompactTask, LevelType, SstableInfo}; use tokio::sync::oneshot::Receiver; use super::task_progress::TaskProgress; use super::{CompactionStatistics, TaskConfig}; -use crate::filter_key_extractor::FilterKeyExtractorImpl; +use crate::filter_key_extractor::{FilterKeyExtractorImpl, FilterKeyExtractorManager}; use crate::hummock::compactor::compaction_utils::{ build_multi_compaction_filter, estimate_task_output_capacity, generate_splits, }; use crate::hummock::compactor::iterator::ConcatSstableIterator; use crate::hummock::compactor::task_progress::TaskProgressGuard; -use crate::hummock::compactor::{CompactOutput, CompactionFilter, Compactor, CompactorContext}; +use crate::hummock::compactor::{ + fast_compactor_runner, CompactOutput, CompactionFilter, Compactor, CompactorContext, +}; use crate::hummock::iterator::{Forward, HummockIterator, UnorderedMergeIteratorInner}; use crate::hummock::multi_builder::{CapacitySplitTableBuilder, TableBuilderFactory}; use crate::hummock::sstable::CompactionDeleteRangesBuilder; @@ -138,9 +140,6 @@ impl CompactorRunner { Ok((self.split_index, ssts, compaction_stat)) } - // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. - // TODO: remove `allow` here after the issued is closed. - #[expect(clippy::needless_pass_by_ref_mut)] pub async fn build_delete_range_iter( sstable_infos: &Vec, sstable_store: &SstableStoreRef, @@ -150,17 +149,20 @@ impl CompactorRunner { let mut local_stats = StoreLocalStatistic::default(); for table_info in sstable_infos { - let table = sstable_store.sstable(table_info, &mut local_stats).await?; - let mut range_tombstone_list = table.value().meta.monotonic_tombstone_events.clone(); - range_tombstone_list.iter_mut().for_each(|tombstone| { - if filter.should_delete(FullKey::from_user_key( - tombstone.event_key.left_user_key.as_ref(), - tombstone.new_epoch, - )) { - tombstone.new_epoch = HummockEpoch::MAX; - } - }); - builder.add_delete_events(range_tombstone_list); + if table_info.range_tombstone_count > 0 { + let table = sstable_store.sstable(table_info, &mut local_stats).await?; + let mut range_tombstone_list = + table.value().meta.monotonic_tombstone_events.clone(); + range_tombstone_list.iter_mut().for_each(|tombstone| { + if filter.should_delete(FullKey::from_user_key( + tombstone.event_key.left_user_key.as_ref(), + tombstone.new_epoch, + )) { + tombstone.new_epoch = HummockEpoch::MAX; + } + }); + builder.add_delete_events(range_tombstone_list); + } } let aggregator = builder.build_for_compaction(); @@ -242,6 +244,7 @@ pub async fn compact( mut compact_task: CompactTask, mut shutdown_rx: Receiver<()>, object_id_getter: Box, + filter_key_extractor_manager: FilterKeyExtractorManager, ) -> (CompactTask, HashMap) { let context = compactor_context.clone(); let group_label = compact_task.compaction_group_id.to_string(); @@ -305,6 +308,7 @@ pub async fn compact( .collect_vec(); compact_table_ids.sort(); compact_table_ids.dedup(); + let single_table = compact_table_ids.len() == 1; let existing_table_ids: HashSet = HashSet::from_iter(compact_task.existing_table_ids.clone()); @@ -313,8 +317,7 @@ pub async fn compact( .into_iter() .filter(|table_id| existing_table_ids.contains(table_id)), ); - let multi_filter_key_extractor = match compactor_context - .filter_key_extractor_manager + let multi_filter_key_extractor = match filter_key_extractor_manager .acquire(compact_table_ids.clone()) .await { @@ -340,7 +343,15 @@ pub async fn compact( } let multi_filter_key_extractor = Arc::new(multi_filter_key_extractor); - + let has_tombstone = compact_task + .input_ssts + .iter() + .flat_map(|level| level.table_infos.iter()) + .any(|sst| sst.range_tombstone_count > 0); + let has_ttl = compact_task + .table_options + .iter() + .any(|(_, table_option)| table_option.retention_seconds > 0); let mut task_status = TaskStatus::Success; // skip sst related to non-existent able_id to reduce io let sstable_infos = compact_task @@ -359,20 +370,31 @@ pub async fn compact( .iter() .map(|table_info| table_info.file_size) .sum::(); - match generate_splits(&sstable_infos, compaction_size, context.clone()).await { - Ok(splits) => { - if !splits.is_empty() { - compact_task.splits = splits; + let all_ssts_are_blocked_filter = sstable_infos + .iter() + .all(|table_info| table_info.bloom_filter_kind() == BloomFilterType::Blocked); + let optimize_by_copy_block = context.storage_opts.enable_fast_compaction + && all_ssts_are_blocked_filter + && !has_tombstone + && !has_ttl + && single_table + && compact_task.target_level > 0 + && compact_task.input_ssts.len() == 2 + && compact_task.task_type() == TaskType::Dynamic; + if !optimize_by_copy_block { + match generate_splits(&sstable_infos, compaction_size, context.clone()).await { + Ok(splits) => { + if !splits.is_empty() { + compact_task.splits = splits; + } + } + Err(e) => { + tracing::warn!("Failed to generate_splits {:#?}", e); + task_status = TaskStatus::ExecuteFailed; + return compact_done(compact_task, context.clone(), vec![], task_status); } - } - - Err(e) => { - tracing::warn!("Failed to generate_splits {:#?}", e); - task_status = TaskStatus::ExecuteFailed; - return compact_done(compact_task, context.clone(), vec![], task_status); } } - let compact_task_statistics = statistics_compact_task(&compact_task); // Number of splits (key ranges) is equal to number of compaction tasks let parallelism = compact_task.splits.len(); @@ -411,16 +433,17 @@ pub async fn compact( ) * compact_task.splits.len() as u64; tracing::info!( - "Ready to handle compaction group {} task: {} compact_task_statistics {:?} target_level {} compression_algorithm {:?} table_ids {:?} parallelism {} task_memory_capacity_with_parallelism {}", - compact_task.compaction_group_id, - compact_task.task_id, - compact_task_statistics, - compact_task.target_level, - compact_task.compression_algorithm, - compact_task.existing_table_ids, - parallelism, - task_memory_capacity_with_parallelism - ); + "Ready to handle compaction group {} task: {} compact_task_statistics {:?} target_level {} compression_algorithm {:?} table_ids {:?} parallelism {} task_memory_capacity_with_parallelism {}, enable fast runner: {}", + compact_task.compaction_group_id, + compact_task.task_id, + compact_task_statistics, + compact_task.target_level, + compact_task.compression_algorithm, + compact_task.existing_table_ids, + parallelism, + task_memory_capacity_with_parallelism, + optimize_by_copy_block + ); // If the task does not have enough memory, it should cancel the task and let the meta // reschedule it, so that it does not occupy the compactor's resources. @@ -440,6 +463,40 @@ pub async fn compact( } context.compactor_metrics.compact_task_pending_num.inc(); + if optimize_by_copy_block { + let runner = fast_compactor_runner::CompactorRunner::new( + context.clone(), + compact_task.clone(), + multi_filter_key_extractor.clone(), + object_id_getter.clone(), + task_progress_guard.progress.clone(), + ); + match runner.run().await { + Ok(ssts) => { + output_ssts.push((0, ssts, CompactionStatistics::default())); + } + Err(e) => { + task_status = TaskStatus::ExecuteFailed; + tracing::warn!( + "Compaction task {} failed with error: {:#?}", + compact_task.task_id, + e + ); + } + } + + context.compactor_metrics.compact_task_pending_num.dec(); + // After a compaction is done, mutate the compaction task. + let (compact_task, table_stats) = + compact_done(compact_task, context.clone(), output_ssts, task_status); + let cost_time = timer.stop_and_record() * 1000.0; + tracing::info!( + "Finished compaction task in {:?}ms: {}", + cost_time, + compact_task_to_string(&compact_task) + ); + return (compact_task, table_stats); + } for (split_index, _) in compact_task.splits.iter().enumerate() { let filter = multi_filter.clone(); let multi_filter_key_extractor = multi_filter_key_extractor.clone(); @@ -645,9 +702,9 @@ where progress_key_num += 1; if let Some(task_progress) = task_progress.as_ref() && progress_key_num >= PROGRESS_KEY_INTERVAL { - task_progress.inc_progress_key(progress_key_num); - progress_key_num = 0; - } + task_progress.inc_progress_key(progress_key_num); + progress_key_num = 0; + } let mut iter_key = iter.key(); compaction_statistics.iter_total_key_counts += 1; @@ -693,7 +750,13 @@ where .await?; } del_iter.next(); + progress_key_num += 1; + if let Some(task_progress) = task_progress.as_ref() && progress_key_num >= PROGRESS_KEY_INTERVAL { + task_progress.inc_progress_key(progress_key_num); + progress_key_num = 0; + } } + let earliest_range_delete_which_can_see_iter_key = del_iter.earliest_delete_since(epoch); // Among keys with same user key, only retain keys which satisfy `epoch` >= `watermark`. @@ -794,13 +857,18 @@ where }) .await?; del_iter.next(); + progress_key_num += 1; + if let Some(task_progress) = task_progress.as_ref() && progress_key_num >= PROGRESS_KEY_INTERVAL { + task_progress.inc_progress_key(progress_key_num); + progress_key_num = 0; + } } } if let Some(task_progress) = task_progress.as_ref() && progress_key_num > 0 { - // Avoid losing the progress_key_num in the last Interval - task_progress.inc_progress_key(progress_key_num); - } + // Avoid losing the progress_key_num in the last Interval + task_progress.inc_progress_key(progress_key_num); + } if let Some(last_table_id) = last_table_id.take() { table_stats_drop.insert(last_table_id, std::mem::take(&mut last_table_stats)); @@ -811,6 +879,7 @@ where Ok(compaction_statistics) } + #[cfg(test)] mod tests { use std::collections::HashSet; @@ -822,10 +891,8 @@ mod tests { use super::*; use crate::hummock::compactor::StateCleanUpCompactionFilter; use crate::hummock::iterator::test_utils::mock_sstable_store; - use crate::hummock::test_utils::{ - default_builder_opt_for_test, gen_test_sstable_with_range_tombstone, - }; - use crate::hummock::{create_monotonic_events, DeleteRangeTombstone}; + use crate::hummock::test_utils::{default_builder_opt_for_test, gen_test_sstable_impl}; + use crate::hummock::{create_monotonic_events, DeleteRangeTombstone, Xor16FilterBuilder}; #[tokio::test] async fn test_delete_range_aggregator_with_filter() { @@ -845,26 +912,26 @@ mod tests { 1, ), ]; - let mut sstable_info_1 = gen_test_sstable_with_range_tombstone( + let mut sstable_info_1 = gen_test_sstable_impl::( default_builder_opt_for_test(), 1, kv_pairs.clone().into_iter(), range_tombstones.clone(), sstable_store.clone(), + CachePolicy::NotFill, ) - .await - .get_sstable_info(); + .await; sstable_info_1.table_ids = vec![1]; - let mut sstable_info_2 = gen_test_sstable_with_range_tombstone( + let mut sstable_info_2 = gen_test_sstable_impl::( default_builder_opt_for_test(), 2, kv_pairs.into_iter(), range_tombstones.clone(), sstable_store.clone(), + CachePolicy::NotFill, ) - .await - .get_sstable_info(); + .await; sstable_info_2.table_ids = vec![2]; let compact_task = CompactTask { diff --git a/src/storage/src/hummock/compactor/context.rs b/src/storage/src/hummock/compactor/context.rs index ad3d5ffcc2dd6..ef015f26cded7 100644 --- a/src/storage/src/hummock/compactor/context.rs +++ b/src/storage/src/hummock/compactor/context.rs @@ -18,7 +18,6 @@ use std::sync::Arc; use parking_lot::RwLock; use super::task_progress::TaskProgressManagerRef; -use crate::filter_key_extractor::FilterKeyExtractorManager; use crate::hummock::compactor::CompactionExecutor; use crate::hummock::sstable_store::SstableStoreRef; use crate::hummock::MemoryLimiter; @@ -42,8 +41,6 @@ pub struct CompactorContext { pub compaction_executor: Arc, - pub filter_key_extractor_manager: FilterKeyExtractorManager, - pub memory_limiter: Arc, pub task_progress_manager: TaskProgressManagerRef, @@ -58,7 +55,6 @@ impl CompactorContext { storage_opts: Arc, sstable_store: SstableStoreRef, compactor_metrics: Arc, - filter_key_extractor_manager: FilterKeyExtractorManager, ) -> Self { let compaction_executor = if storage_opts.share_buffer_compaction_worker_threads_number == 0 { @@ -76,7 +72,6 @@ impl CompactorContext { compactor_metrics, is_share_buffer_compact: true, compaction_executor, - filter_key_extractor_manager, memory_limiter: MemoryLimiter::unlimit(), task_progress_manager: Default::default(), await_tree_reg: None, diff --git a/src/storage/src/hummock/compactor/fast_compactor_runner.rs b/src/storage/src/hummock/compactor/fast_compactor_runner.rs new file mode 100644 index 0000000000000..6dcfb0e2392cf --- /dev/null +++ b/src/storage/src/hummock/compactor/fast_compactor_runner.rs @@ -0,0 +1,576 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp::Ordering; +use std::collections::HashSet; +use std::marker::PhantomData; +use std::sync::atomic::AtomicU64; +use std::sync::{atomic, Arc}; +use std::time::Instant; + +use await_tree::InstrumentAwait; +use bytes::Bytes; +use itertools::Itertools; +use risingwave_hummock_sdk::key::FullKey; +use risingwave_hummock_sdk::key_range::KeyRange; +use risingwave_hummock_sdk::{can_concat, HummockEpoch, LocalSstableInfo}; +use risingwave_pb::hummock::{CompactTask, SstableInfo}; + +use crate::filter_key_extractor::FilterKeyExtractorImpl; +use crate::hummock::compactor::task_progress::TaskProgress; +use crate::hummock::compactor::{Compactor, CompactorContext, RemoteBuilderFactory, TaskConfig}; +use crate::hummock::multi_builder::{CapacitySplitTableBuilder, TableBuilderFactory}; +use crate::hummock::sstable_store::{BlockStream, SstableStoreRef}; +use crate::hummock::value::HummockValue; +use crate::hummock::{ + Block, BlockBuilder, BlockHolder, BlockIterator, BlockMeta, BlockedXor16FilterBuilder, + CachePolicy, CompressionAlgorithm, GetObjectId, HummockResult, SstableBuilderOptions, + StreamingSstableWriterFactory, TableHolder, +}; +use crate::monitor::{CompactorMetrics, StoreLocalStatistic}; + +/// Iterates over the KV-pairs of an SST while downloading it. +pub struct BlockStreamIterator { + /// The downloading stream. + block_stream: BlockStream, + + next_block_index: usize, + + /// For key sanity check of divided SST and debugging + sstable: TableHolder, + iter: Option, + task_progress: Arc, +} + +impl BlockStreamIterator { + // We have to handle two internal iterators. + // `block_stream`: iterates over the blocks of the table. + // `block_iter`: iterates over the KV-pairs of the current block. + // These iterators work in different ways. + + // BlockIterator works as follows: After new(), we call seek(). That brings us + // to the first element. Calling next() then brings us to the second element and does not + // return anything. + + // BlockStream follows a different approach. After new(), we do not seek, instead next() + // returns the first value. + + /// Initialises a new [`BlockStreamIterator`] which iterates over the given [`BlockStream`]. + /// The iterator reads at most `max_block_count` from the stream. + pub fn new( + sstable: TableHolder, + block_stream: BlockStream, + task_progress: Arc, + ) -> Self { + Self { + block_stream, + next_block_index: 0, + sstable, + iter: None, + task_progress, + } + } + + /// Wrapper function for `self.block_stream.next()` which allows us to measure the time needed. + async fn download_next_block(&mut self) -> HummockResult, BlockMeta)>> { + let (data, meta) = match self.block_stream.next().await? { + None => return Ok(None), + Some(ret) => ret, + }; + let filter_block = self + .sstable + .value() + .filter_reader + .get_block_raw_filter(self.next_block_index); + self.next_block_index += 1; + Ok(Some((data, filter_block, meta))) + } + + fn init_block_iter(&mut self, buf: Bytes, uncompressed_capacity: usize) -> HummockResult<()> { + let block = Block::decode(buf, uncompressed_capacity)?; + let mut iter = BlockIterator::new(BlockHolder::from_owned_block(Box::new(block))); + iter.seek_to_first(); + self.iter = Some(iter); + Ok(()) + } + + fn next_block_smallest(&self) -> &[u8] { + self.sstable.value().meta.block_metas[self.next_block_index] + .smallest_key + .as_ref() + } + + fn next_block_largest(&self) -> &[u8] { + if self.next_block_index + 1 < self.sstable.value().meta.block_metas.len() { + self.sstable.value().meta.block_metas[self.next_block_index + 1] + .smallest_key + .as_ref() + } else { + self.sstable.value().meta.largest_key.as_ref() + } + } + + fn current_block_largest(&self) -> Vec { + if self.next_block_index < self.sstable.value().meta.block_metas.len() { + let mut largest_key = FullKey::decode( + self.sstable.value().meta.block_metas[self.next_block_index] + .smallest_key + .as_ref(), + ); + // do not include this key because it is the smallest key of next block. + largest_key.epoch = HummockEpoch::MAX; + largest_key.encode() + } else { + self.sstable.value().meta.largest_key.clone() + } + } + + fn key(&self) -> FullKey<&[u8]> { + match self.iter.as_ref() { + Some(iter) => iter.key(), + None => FullKey::decode( + self.sstable.value().meta.block_metas[self.next_block_index] + .smallest_key + .as_ref(), + ), + } + } + + fn is_valid(&self) -> bool { + self.iter.is_some() || self.next_block_index < self.sstable.value().meta.block_metas.len() + } +} + +impl Drop for BlockStreamIterator { + fn drop(&mut self) { + self.task_progress + .num_pending_read_io + .fetch_sub(1, std::sync::atomic::Ordering::SeqCst); + } +} + +/// Iterates over the KV-pairs of a given list of SSTs. The key-ranges of these SSTs are assumed to +/// be consecutive and non-overlapping. +pub struct ConcatSstableIterator { + /// The iterator of the current table. + sstable_iter: Option, + + /// Current table index. + cur_idx: usize, + + /// All non-overlapping tables. + sstables: Vec, + + sstable_store: SstableStoreRef, + + stats: StoreLocalStatistic, + task_progress: Arc, +} + +impl ConcatSstableIterator { + /// Caller should make sure that `tables` are non-overlapping, + /// arranged in ascending order when it serves as a forward iterator, + /// and arranged in descending order when it serves as a backward iterator. + pub fn new( + sst_infos: Vec, + sstable_store: SstableStoreRef, + task_progress: Arc, + ) -> Self { + Self { + sstable_iter: None, + cur_idx: 0, + sstables: sst_infos, + sstable_store, + task_progress, + stats: StoreLocalStatistic::default(), + } + } + + pub async fn rewind(&mut self) -> HummockResult<()> { + self.seek_idx(0).await + } + + pub async fn next_sstable(&mut self) -> HummockResult<()> { + self.seek_idx(self.cur_idx + 1).await + } + + pub fn current_sstable(&mut self) -> &mut BlockStreamIterator { + self.sstable_iter.as_mut().unwrap() + } + + pub fn estimate_key_count(&self, uncompressed_block_size: u64) -> (u64, u64) { + let total_size = self.sstables[self.cur_idx].uncompressed_file_size; + if total_size == 0 { + return (0, 0); + } + // use ratio to avoid multiply overflow + let ratio = uncompressed_block_size * 10000 / total_size; + ( + self.sstables[self.cur_idx].stale_key_count * ratio / 10000, + self.sstables[self.cur_idx].total_key_count * ratio / 10000, + ) + } + + pub async fn init_block_iter(&mut self) -> HummockResult<()> { + if let Some(sstable) = self.sstable_iter.as_mut() { + if sstable.iter.is_some() { + return Ok(()); + } + let (buf, _, meta) = sstable.download_next_block().await?.unwrap(); + sstable.init_block_iter(buf, meta.uncompressed_size as usize)?; + } + Ok(()) + } + + pub fn is_valid(&self) -> bool { + self.cur_idx < self.sstables.len() + } + + /// Resets the iterator, loads the specified SST, and seeks in that SST to `seek_key` if given. + async fn seek_idx(&mut self, idx: usize) -> HummockResult<()> { + self.sstable_iter.take(); + self.cur_idx = idx; + if self.cur_idx < self.sstables.len() { + let sstable_info = &self.sstables[self.cur_idx]; + let sstable = self + .sstable_store + .sstable(sstable_info, &mut self.stats) + .verbose_instrument_await("stream_iter_sstable") + .await?; + let stats_ptr = self.stats.remote_io_time.clone(); + let now = Instant::now(); + self.task_progress + .num_pending_read_io + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + let block_stream = self + .sstable_store + .get_stream_by_position(sstable.value().id, 0, &sstable.value().meta.block_metas) + .verbose_instrument_await("stream_iter_get_stream") + .await?; + + // Determine time needed to open stream. + let add = (now.elapsed().as_secs_f64() * 1000.0).ceil(); + stats_ptr.fetch_add(add as u64, atomic::Ordering::Relaxed); + + let sstable_iter = + BlockStreamIterator::new(sstable, block_stream, self.task_progress.clone()); + self.sstable_iter = Some(sstable_iter); + } + Ok(()) + } +} + +pub struct CompactorRunner { + left: Box, + right: Box, + task_id: u64, + executor: CompactTaskExecutor< + RemoteBuilderFactory, + >, + compression_algorithm: CompressionAlgorithm, + metrics: Arc, + task_progress: Arc, +} + +impl CompactorRunner { + pub fn new( + context: CompactorContext, + task: CompactTask, + filter_key_extractor: Arc, + object_id_getter: Box, + task_progress: Arc, + ) -> Self { + let mut options: SstableBuilderOptions = context.storage_opts.as_ref().into(); + let compression_algorithm: CompressionAlgorithm = task.compression_algorithm.into(); + options.compression_algorithm = compression_algorithm; + options.capacity = task.target_file_size as usize; + let get_id_time = Arc::new(AtomicU64::new(0)); + + let key_range = KeyRange::inf(); + + let task_config = TaskConfig { + key_range, + cache_policy: CachePolicy::NotFill, + gc_delete_keys: task.gc_delete_keys, + watermark: task.watermark, + stats_target_table_ids: Some(HashSet::from_iter(task.existing_table_ids.clone())), + task_type: task.task_type(), + is_target_l0_or_lbase: task.target_level == 0 || task.target_level == task.base_level, + split_by_table: task.split_by_state_table, + split_weight_by_vnode: task.split_weight_by_vnode, + use_block_based_filter: true, + }; + let factory = StreamingSstableWriterFactory::new(context.sstable_store.clone()); + let builder_factory = RemoteBuilderFactory::<_, BlockedXor16FilterBuilder> { + object_id_getter, + limiter: context.memory_limiter.clone(), + options, + policy: task_config.cache_policy, + remote_rpc_cost: get_id_time, + filter_key_extractor, + sstable_writer_factory: factory, + _phantom: PhantomData, + }; + let sst_builder = CapacitySplitTableBuilder::new( + builder_factory, + context.compactor_metrics.clone(), + Some(task_progress.clone()), + task_config.is_target_l0_or_lbase, + task_config.split_by_table, + task_config.split_weight_by_vnode, + ); + assert_eq!(task.input_ssts.len(), 2); + let left = Box::new(ConcatSstableIterator::new( + task.input_ssts[0].table_infos.clone(), + context.sstable_store.clone(), + task_progress.clone(), + )); + let right = Box::new(ConcatSstableIterator::new( + task.input_ssts[1].table_infos.clone(), + context.sstable_store, + task_progress.clone(), + )); + + Self { + executor: CompactTaskExecutor::new(sst_builder, task_config), + left, + right, + task_id: task.task_id, + metrics: context.compactor_metrics.clone(), + compression_algorithm, + task_progress, + } + } + + pub async fn run(mut self) -> HummockResult> { + self.left.rewind().await?; + self.right.rewind().await?; + let mut skip_raw_block_count = 0; + let mut skip_raw_block_size = 0; + while self.left.is_valid() && self.right.is_valid() { + let ret = self + .left + .current_sstable() + .key() + .cmp(&self.right.current_sstable().key()); + let (first, second) = if ret == Ordering::Less { + (&mut self.left, &mut self.right) + } else { + (&mut self.right, &mut self.left) + }; + assert!(ret != Ordering::Equal); + if first.current_sstable().iter.is_none() { + let right_key = second.current_sstable().key(); + while first.current_sstable().is_valid() { + let full_key = FullKey::decode(first.current_sstable().next_block_largest()); + // the full key may be either Excluded key or Included key, so we do not allow + // they equals. + if full_key.user_key.ge(&right_key.user_key) { + break; + } + let smallest_key = + FullKey::decode(first.current_sstable().next_block_smallest()); + if self + .executor + .last_key + .user_key + .as_ref() + .eq(&smallest_key.user_key) + { + // If the last key is delete tombstone, we can not append the origin block + // because it would cause a deleted key could be see by user again. + break; + } + let smallest_key = smallest_key.to_vec(); + + let (mut block, filter_data, mut meta) = first + .current_sstable() + .download_next_block() + .await? + .unwrap(); + let algorithm = Block::get_algorithm(&block)?; + if algorithm == CompressionAlgorithm::None + && algorithm != self.compression_algorithm + { + block = BlockBuilder::compress_block(block, self.compression_algorithm)?; + meta.len = block.len() as u32; + } + + let largest_key = first.current_sstable().current_block_largest(); + let block_len = block.len() as u64; + + if self + .executor + .builder + .add_raw_block(block, filter_data, smallest_key, largest_key, meta) + .await? + { + skip_raw_block_size += block_len; + skip_raw_block_count += 1; + } + self.executor.clear(); + } + if !first.current_sstable().is_valid() { + first.next_sstable().await?; + continue; + } + first.init_block_iter().await?; + } + + let target_key = second.current_sstable().key(); + let iter = first.sstable_iter.as_mut().unwrap().iter.as_mut().unwrap(); + self.executor.run(iter, target_key).await?; + if !iter.is_valid() { + first.sstable_iter.as_mut().unwrap().iter.take(); + if !first.current_sstable().is_valid() { + first.next_sstable().await?; + } + } + } + let rest_data = if !self.left.is_valid() { + &mut self.right + } else { + &mut self.left + }; + if rest_data.is_valid() { + // compact rest keys of the current block. + let sstable_iter = rest_data.sstable_iter.as_mut().unwrap(); + let target_key = FullKey::decode(&sstable_iter.sstable.value().meta.largest_key); + if let Some(iter) = sstable_iter.iter.as_mut() { + self.executor.run(iter, target_key).await?; + assert!(!iter.is_valid()); + } + sstable_iter.iter.take(); + } + + while rest_data.is_valid() { + let mut sstable_iter = rest_data.sstable_iter.take().unwrap(); + while sstable_iter.is_valid() { + let smallest_key = FullKey::decode(sstable_iter.next_block_smallest()).to_vec(); + let (block, filter_data, block_meta) = + sstable_iter.download_next_block().await?.unwrap(); + let largest_key = sstable_iter.current_block_largest(); + let block_len = block.len() as u64; + if self + .executor + .builder + .add_raw_block(block, filter_data, smallest_key, largest_key, block_meta) + .await? + { + skip_raw_block_count += 1; + skip_raw_block_size += block_len; + } + } + rest_data.next_sstable().await?; + } + let mut total_read_bytes = 0; + for sst in &self.left.sstables { + total_read_bytes += sst.file_size; + } + for sst in &self.right.sstables { + total_read_bytes += sst.file_size; + } + self.metrics + .compact_fast_runner_bytes + .inc_by(skip_raw_block_size); + tracing::info!( + "OPTIMIZATION: skip {} blocks for task-{}, optimize {}% data compression", + skip_raw_block_count, + self.task_id, + skip_raw_block_size * 100 / total_read_bytes, + ); + + let outputs = self.executor.builder.finish().await?; + let ssts = Compactor::report_progress( + self.metrics.clone(), + Some(self.task_progress.clone()), + outputs, + false, + ) + .await?; + let sst_infos = ssts.iter().map(|sst| sst.sst_info.clone()).collect_vec(); + assert!(can_concat(&sst_infos)); + Ok(ssts) + } +} + +pub struct CompactTaskExecutor { + last_key: FullKey>, + watermark_can_see_last_key: bool, + builder: CapacitySplitTableBuilder, + task_config: TaskConfig, + last_key_is_delete: bool, +} + +impl CompactTaskExecutor { + pub fn new(builder: CapacitySplitTableBuilder, task_config: TaskConfig) -> Self { + Self { + builder, + task_config, + last_key: FullKey::default(), + watermark_can_see_last_key: false, + last_key_is_delete: false, + } + } + + fn clear(&mut self) { + if !self.last_key.is_empty() { + self.last_key = FullKey::default(); + } + self.watermark_can_see_last_key = false; + self.last_key_is_delete = false; + } + + pub async fn run( + &mut self, + iter: &mut BlockIterator, + target_key: FullKey<&[u8]>, + ) -> HummockResult<()> { + while iter.is_valid() && iter.key().le(&target_key) { + let is_new_user_key = + !self.last_key.is_empty() && iter.key().user_key != self.last_key.user_key.as_ref(); + let mut drop = false; + let epoch = iter.key().epoch; + let value = HummockValue::from_slice(iter.value()).unwrap(); + if is_new_user_key || self.last_key.is_empty() { + self.last_key.set(iter.key()); + self.watermark_can_see_last_key = false; + self.last_key_is_delete = false; + } + if epoch <= self.task_config.watermark + && self.task_config.gc_delete_keys + && value.is_delete() + { + drop = true; + self.last_key_is_delete = true; + } else if epoch < self.task_config.watermark && self.watermark_can_see_last_key { + drop = true; + } + + if epoch <= self.task_config.watermark { + self.watermark_can_see_last_key = true; + } + + if drop { + iter.next(); + continue; + } + self.builder + .add_full_key(iter.key(), value, is_new_user_key) + .await?; + iter.next(); + } + Ok(()) + } +} diff --git a/src/storage/src/hummock/compactor/iterator.rs b/src/storage/src/hummock/compactor/iterator.rs index 60c775bbc4ae4..f925d871a610e 100644 --- a/src/storage/src/hummock/compactor/iterator.rs +++ b/src/storage/src/hummock/compactor/iterator.rs @@ -159,7 +159,7 @@ impl SstableStreamIterator { loop { let now = Instant::now(); let ret = match &mut self.block_stream { - Some(block_stream) => block_stream.next().await, + Some(block_stream) => block_stream.next_block().await, None => { self.create_stream().await?; continue; @@ -510,7 +510,7 @@ mod tests { use crate::hummock::iterator::test_utils::mock_sstable_store; use crate::hummock::iterator::HummockIterator; use crate::hummock::test_utils::{ - default_builder_opt_for_test, gen_test_sstable_and_info, test_key_of, test_value_of, + default_builder_opt_for_test, gen_test_sstable_info, test_key_of, test_value_of, TEST_KEYS_COUNT, }; use crate::hummock::value::HummockValue; @@ -522,7 +522,7 @@ mod tests { for object_id in 0..3 { let start_index = object_id * TEST_KEYS_COUNT; let end_index = (object_id + 1) * TEST_KEYS_COUNT; - let (_table, table_info) = gen_test_sstable_and_info( + let table_info = gen_test_sstable_info( default_builder_opt_for_test(), object_id as u64, (start_index..end_index) @@ -642,7 +642,7 @@ mod tests { for object_id in 0..3 { let start_index = object_id * TEST_KEYS_COUNT + TEST_KEYS_COUNT / 2; let end_index = (object_id + 1) * TEST_KEYS_COUNT; - let (_table, table_info) = gen_test_sstable_and_info( + let table_info = gen_test_sstable_info( default_builder_opt_for_test(), object_id as u64, (start_index..end_index) diff --git a/src/storage/src/hummock/compactor/mod.rs b/src/storage/src/hummock/compactor/mod.rs index c9d9d43c38785..137682d6f7825 100644 --- a/src/storage/src/hummock/compactor/mod.rs +++ b/src/storage/src/hummock/compactor/mod.rs @@ -15,13 +15,24 @@ mod compaction_executor; mod compaction_filter; pub mod compaction_utils; +use risingwave_pb::compactor::{dispatch_compaction_task_request, DispatchCompactionTaskRequest}; +use risingwave_pb::hummock::report_compaction_task_request::{ + Event as ReportCompactionTaskEvent, HeartBeat as SharedHeartBeat, + ReportTask as ReportSharedTask, +}; +use risingwave_pb::hummock::{ReportFullScanTaskRequest, ReportVacuumTaskRequest}; +use risingwave_rpc_client::GrpcCompactorProxyClient; +use tokio::sync::mpsc; +use tonic::Request; + pub mod compactor_runner; mod context; +pub mod fast_compactor_runner; mod iterator; mod shared_buffer_compact; pub(super) mod task_progress; -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::marker::PhantomData; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; @@ -34,7 +45,6 @@ pub use compaction_filter::{ TtlCompactionFilter, }; pub use context::CompactorContext; -use futures::future::try_join_all; use futures::{pin_mut, StreamExt}; pub use iterator::{ConcatSstableIterator, SstableStreamIterator}; use more_asserts::assert_ge; @@ -46,7 +56,8 @@ use risingwave_pb::hummock::subscribe_compaction_event_request::{ }; use risingwave_pb::hummock::subscribe_compaction_event_response::Event as ResponseEvent; use risingwave_pb::hummock::{ - CompactTaskProgress, SubscribeCompactionEventRequest, SubscribeCompactionEventResponse, + CompactTaskProgress, ReportCompactionTaskRequest, SubscribeCompactionEventRequest, + SubscribeCompactionEventResponse, }; use risingwave_rpc_client::HummockMetaClient; pub use shared_buffer_compact::{compact, merge_imms_in_memory}; @@ -61,15 +72,19 @@ use super::{ CompactionDeleteRanges, GetObjectId, HummockResult, SstableBuilderOptions, SstableObjectIdManager, Xor16FilterBuilder, }; -use crate::filter_key_extractor::FilterKeyExtractorImpl; +use crate::filter_key_extractor::{ + FilterKeyExtractorImpl, FilterKeyExtractorManager, StaticFilterKeyExtractorManager, +}; use crate::hummock::compactor::compactor_runner::compact_and_build_sst; use crate::hummock::iterator::{Forward, HummockIterator}; use crate::hummock::multi_builder::SplitTableOutput; use crate::hummock::vacuum::Vacuum; use crate::hummock::{ validate_ssts, BatchSstableWriterFactory, BlockedXor16FilterBuilder, FilterBuilder, - HummockError, SstableWriterFactory, StreamingSstableWriterFactory, + HummockError, SharedComapctorObjectIdManager, SstableWriterFactory, + StreamingSstableWriterFactory, }; +use crate::monitor::CompactorMetrics; /// Implementation of Hummock compaction. pub struct Compactor { @@ -190,49 +205,14 @@ impl Compactor { compact_timer.observe_duration(); - let mut ssts = Vec::with_capacity(split_table_outputs.len()); - let mut upload_join_handles = vec![]; - - for SplitTableOutput { - sst_info, - upload_join_handle, - } in split_table_outputs - { - let sst_size = sst_info.file_size(); - ssts.push(sst_info); - - let tracker_cloned = task_progress.clone(); - let context_cloned = self.context.clone(); - upload_join_handles.push(async move { - upload_join_handle - .verbose_instrument_await("upload") - .await - .map_err(HummockError::sstable_upload_error)??; - if let Some(tracker) = tracker_cloned { - tracker.inc_ssts_uploaded(); - tracker - .num_pending_write_io - .fetch_sub(1, std::sync::atomic::Ordering::SeqCst); - } - if context_cloned.is_share_buffer_compact { - context_cloned - .compactor_metrics - .shared_buffer_to_sstable_size - .observe(sst_size as _); - } else { - context_cloned - .compactor_metrics - .compaction_upload_sst_counts - .inc(); - } - Ok::<_, HummockError>(()) - }); - } + let ssts = Self::report_progress( + self.context.compactor_metrics.clone(), + task_progress, + split_table_outputs, + self.context.is_share_buffer_compact, + ) + .await?; - // Check if there are any failed uploads. Report all of those SSTs. - try_join_all(upload_join_handles) - .verbose_instrument_await("join") - .await?; self.context .compactor_metrics .get_table_id_total_time_duration @@ -254,6 +234,45 @@ impl Compactor { Ok((ssts, table_stats_map)) } + pub async fn report_progress( + metrics: Arc, + task_progress: Option>, + split_table_outputs: Vec, + is_share_buffer_compact: bool, + ) -> HummockResult> { + let mut ssts = Vec::with_capacity(split_table_outputs.len()); + let mut rets = vec![]; + + for SplitTableOutput { + sst_info, + upload_join_handle, + } in split_table_outputs + { + let sst_size = sst_info.file_size(); + ssts.push(sst_info); + let ret = upload_join_handle + .verbose_instrument_await("upload") + .await + .map_err(HummockError::sstable_upload_error); + rets.push(ret); + if let Some(tracker) = &task_progress { + tracker.inc_ssts_uploaded(); + tracker + .num_pending_write_io + .fetch_sub(1, std::sync::atomic::Ordering::SeqCst); + } + if is_share_buffer_compact { + metrics.shared_buffer_to_sstable_size.observe(sst_size as _); + } else { + metrics.compaction_upload_sst_counts.inc(); + } + } + for ret in rets { + ret??; + } + Ok(ssts) + } + async fn compact_key_range_impl( &self, writer_factory: F, @@ -306,11 +325,12 @@ impl Compactor { /// The background compaction thread that receives compaction tasks from hummock compaction /// manager and runs compaction tasks. -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] pub fn start_compactor( compactor_context: CompactorContext, hummock_meta_client: Arc, sstable_object_id_manager: Arc, + filter_key_extractor_manager: FilterKeyExtractorManager, ) -> (JoinHandle<()>, Sender<()>) { type CompactionShutdownMap = Arc>>>; let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); @@ -386,17 +406,7 @@ pub fn start_compactor( let request_sender = request_sender.clone(); let event: Option> = tokio::select! { _ = periodic_event_interval.tick() => { - let mut progress_list = Vec::new(); - for (&task_id, progress) in &*task_progress.lock() { - progress_list.push(CompactTaskProgress { - task_id, - num_ssts_sealed: progress.num_ssts_sealed.load(Ordering::Relaxed), - num_ssts_uploaded: progress.num_ssts_uploaded.load(Ordering::Relaxed), - num_progress_key: progress.num_progress_key.load(Ordering::Relaxed), - num_pending_read_io: progress.num_pending_read_io.load(Ordering::Relaxed) as u64, - num_pending_write_io: progress.num_pending_write_io.load(Ordering::Relaxed) as u64, - }); - } + let progress_list = get_task_progress(task_progress.clone()); if let Err(e) = request_sender.send(SubscribeCompactionEventRequest { event: Some(RequestEvent::HeartBeat( @@ -453,7 +463,6 @@ pub fn start_compactor( continue; } - event = response_event_stream.next() => { event } @@ -484,6 +493,7 @@ pub fn start_compactor( let meta_client = hummock_meta_client.clone(); let sstable_object_id_manager = sstable_object_id_manager.clone(); + let filter_key_extractor_manager = filter_key_extractor_manager.clone(); executor.spawn(async move { let running_task_count = running_task_count.clone(); match event { @@ -502,7 +512,7 @@ pub fn start_compactor( sstable_object_id_manager.remove_watermark_object_id(tracker_id); }, ); - compactor_runner::compact(context, compact_task, rx, Box::new(sstable_object_id_manager.clone())).await + compactor_runner::compact(context, compact_task, rx, Box::new(sstable_object_id_manager.clone()), filter_key_extractor_manager.clone()).await }, Err(err) => { tracing::warn!("Failed to track pending SST object id. {:#?}", err); @@ -518,7 +528,9 @@ pub fn start_compactor( if let Err(e) = request_sender.send(SubscribeCompactionEventRequest { event: Some(RequestEvent::ReportTask( ReportTask { - compact_task: Some(compact_task), + task_id: compact_task.task_id, + task_status: compact_task.task_status, + sorted_output_ssts: compact_task.sorted_output_ssts, table_stats_change:to_prost_table_stats_map(table_stats), } )), @@ -603,3 +615,204 @@ pub fn start_compactor( (join_handle, shutdown_tx) } + +/// The background compaction thread that receives compaction tasks from hummock compaction +/// manager and runs compaction tasks. +#[cfg_attr(coverage, coverage(off))] +pub fn start_shared_compactor( + grpc_proxy_client: GrpcCompactorProxyClient, + mut receiver: mpsc::UnboundedReceiver>, + context: CompactorContext, +) -> (JoinHandle<()>, Sender<()>) { + type CompactionShutdownMap = Arc>>>; + let task_progress = context.task_progress_manager.clone(); + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); + let periodic_event_update_interval = Duration::from_millis(1000); + + let join_handle = tokio::spawn(async move { + let shutdown_map = CompactionShutdownMap::default(); + + let mut periodic_event_interval = tokio::time::interval(periodic_event_update_interval); + let executor = context.compaction_executor.clone(); + let report_heartbeat_client = grpc_proxy_client.clone(); + 'consume_stream: loop { + let request: Option> = tokio::select! { + _ = periodic_event_interval.tick() => { + let progress_list = get_task_progress(task_progress.clone()); + let report_compaction_task_request = ReportCompactionTaskRequest{ + event: Some(ReportCompactionTaskEvent::HeartBeat( + SharedHeartBeat { + progress: progress_list + } + )), + }; + if let Err(e) = report_heartbeat_client.report_compaction_task(report_compaction_task_request).await{ + tracing::warn!("Failed to report heartbeat {:#?}", e); + } + continue + } + + + _ = &mut shutdown_rx => { + tracing::info!("Compactor is shutting down"); + return + } + + request = receiver.recv() => { + request + } + + }; + match request { + Some(request) => { + let context = context.clone(); + let shutdown = shutdown_map.clone(); + + let cloned_grpc_proxy_client = grpc_proxy_client.clone(); + executor.spawn(async move { + let DispatchCompactionTaskRequest { + tables, + output_object_ids, + task: dispatch_task, + } = request.into_inner(); + let id_to_tables = tables.into_iter().fold(HashMap::new(), |mut acc, table| { + acc.insert(table.id, table); + acc + }); + let static_filter_key_extractor_manager: Arc = + Arc::new(StaticFilterKeyExtractorManager::new(id_to_tables)); + let filter_key_extractor_manager = + FilterKeyExtractorManager::StaticFilterKeyExtractorManager( + static_filter_key_extractor_manager, + ); + + let mut output_object_ids_deque: VecDeque<_> = VecDeque::new(); + output_object_ids_deque.extend(output_object_ids); + let shared_compactor_object_id_manager = + SharedComapctorObjectIdManager::new(output_object_ids_deque, cloned_grpc_proxy_client.clone(), context.storage_opts.sstable_id_remote_fetch_number); + match dispatch_task.unwrap() { + dispatch_compaction_task_request::Task::CompactTask(compact_task) => { + context.running_task_count.fetch_add(1, Ordering::SeqCst); + let (tx, rx) = tokio::sync::oneshot::channel(); + let task_id = compact_task.task_id; + shutdown.lock().unwrap().insert(task_id, tx); + + let (compact_task, table_stats) = compactor_runner::compact( + context.clone(), + compact_task, + rx, + Box::new(shared_compactor_object_id_manager), + filter_key_extractor_manager.clone(), + ) + .await; + shutdown.lock().unwrap().remove(&task_id); + context.running_task_count.fetch_sub(1, Ordering::SeqCst); + let report_compaction_task_request = ReportCompactionTaskRequest { + event: Some(ReportCompactionTaskEvent::ReportTask(ReportSharedTask { + compact_task: Some(compact_task), + table_stats_change: to_prost_table_stats_map(table_stats), + })), + }; + + match cloned_grpc_proxy_client + .report_compaction_task(report_compaction_task_request) + .await + { + Ok(_) => {} + Err(e) => tracing::warn!("Failed to report task {task_id:?} . {e:?}"), + } + } + dispatch_compaction_task_request::Task::VacuumTask(vacuum_task) => { + match Vacuum::handle_vacuum_task( + context.sstable_store.clone(), + &vacuum_task.sstable_object_ids, + ) + .await + { + Ok(_) => { + let report_vacuum_task_request = ReportVacuumTaskRequest { + vacuum_task: Some(vacuum_task), + }; + match cloned_grpc_proxy_client.report_vacuum_task(report_vacuum_task_request).await { + Ok(_) => tracing::info!("Finished vacuuming SSTs"), + Err(e) => tracing::warn!("Failed to report vacuum task: {:#?}", e), + } + } + Err(e) => { + tracing::warn!("Failed to vacuum task: {:#?}", e) + } + } + } + dispatch_compaction_task_request::Task::FullScanTask(full_scan_task) => { + match Vacuum::handle_full_scan_task(full_scan_task, context.sstable_store.clone()) + .await + { + Ok((object_ids, total_object_count, total_object_size)) => { + let report_full_scan_task_request = ReportFullScanTaskRequest { + object_ids, + total_object_count, + total_object_size, + }; + match cloned_grpc_proxy_client + .report_full_scan_task(report_full_scan_task_request) + .await + { + Ok(_) => tracing::info!("Finished full scan SSTs"), + Err(e) => tracing::warn!("Failed to report full scan task: {:#?}", e), + } + } + Err(e) => { + tracing::warn!("Failed to iter object: {:#?}", e); + } + } + } + dispatch_compaction_task_request::Task::ValidationTask(validation_task) => { + validate_ssts(validation_task, context.sstable_store.clone()).await; + } + dispatch_compaction_task_request::Task::CancelCompactTask(cancel_compact_task) => { + if let Some(tx) = shutdown + .lock() + .unwrap() + .remove(&cancel_compact_task.task_id) + { + if tx.send(()).is_err() { + tracing::warn!( + "Cancellation of compaction task failed. task_id: {}", + cancel_compact_task.task_id + ); + } + } else { + tracing::warn!( + "Attempting to cancel non-existent compaction task. task_id: {}", + cancel_compact_task.task_id + ); + } + } + } + }); + } + None => continue 'consume_stream, + } + } + }); + (join_handle, shutdown_tx) +} + +fn get_task_progress( + task_progress: Arc< + parking_lot::lock_api::Mutex>>, + >, +) -> Vec { + let mut progress_list = Vec::new(); + for (&task_id, progress) in &*task_progress.lock() { + progress_list.push(CompactTaskProgress { + task_id, + num_ssts_sealed: progress.num_ssts_sealed.load(Ordering::Relaxed), + num_ssts_uploaded: progress.num_ssts_uploaded.load(Ordering::Relaxed), + num_progress_key: progress.num_progress_key.load(Ordering::Relaxed), + num_pending_read_io: progress.num_pending_read_io.load(Ordering::Relaxed) as u64, + num_pending_write_io: progress.num_pending_write_io.load(Ordering::Relaxed) as u64, + }); + } + progress_list +} diff --git a/src/storage/src/hummock/compactor/shared_buffer_compact.rs b/src/storage/src/hummock/compactor/shared_buffer_compact.rs index bcbaa19e2c3b1..428361237c0ac 100644 --- a/src/storage/src/hummock/compactor/shared_buffer_compact.rs +++ b/src/storage/src/hummock/compactor/shared_buffer_compact.rs @@ -30,7 +30,7 @@ use risingwave_hummock_sdk::{CompactionGroupId, HummockEpoch, LocalSstableInfo}; use risingwave_pb::hummock::compact_task; use tracing::error; -use crate::filter_key_extractor::FilterKeyExtractorImpl; +use crate::filter_key_extractor::{FilterKeyExtractorImpl, FilterKeyExtractorManager}; use crate::hummock::compactor::compaction_filter::DummyCompactionFilter; use crate::hummock::compactor::context::CompactorContext; use crate::hummock::compactor::{CompactOutput, Compactor}; @@ -41,7 +41,6 @@ use crate::hummock::shared_buffer::shared_buffer_batch::{ SharedBufferBatch, SharedBufferBatchInner, SharedBufferVersionedEntry, }; use crate::hummock::sstable::CompactionDeleteRangesBuilder; -use crate::hummock::store::memtable::ImmutableMemtable; use crate::hummock::utils::MemoryTracker; use crate::hummock::value::HummockValue; use crate::hummock::{ @@ -49,6 +48,7 @@ use crate::hummock::{ CompactionDeleteRanges, GetObjectId, HummockError, HummockResult, SstableBuilderOptions, SstableObjectIdManagerRef, }; +use crate::mem_table::ImmutableMemtable; const GC_DELETE_KEYS_FOR_FLUSH: bool = false; const GC_WATERMARK_FOR_FLUSH: u64 = 0; @@ -59,6 +59,7 @@ pub async fn compact( sstable_object_id_manager: SstableObjectIdManagerRef, payload: UploadTaskPayload, compaction_group_index: Arc>, + filter_key_extractor_manager: FilterKeyExtractorManager, ) -> HummockResult> { let mut grouped_payload: HashMap = HashMap::new(); for imm in payload { @@ -86,6 +87,7 @@ pub async fn compact( compact_shared_buffer( context.clone(), sstable_object_id_manager.clone(), + filter_key_extractor_manager.clone(), group_payload, ) .map_ok(move |results| { @@ -112,6 +114,7 @@ pub async fn compact( async fn compact_shared_buffer( context: CompactorContext, sstable_object_id_manager: SstableObjectIdManagerRef, + filter_key_extractor_manager: FilterKeyExtractorManager, mut payload: UploadTaskPayload, ) -> HummockResult> { // Local memory compaction looks at all key ranges. @@ -124,8 +127,7 @@ async fn compact_shared_buffer( assert!(!existing_table_ids.is_empty()); - let multi_filter_key_extractor = context - .filter_key_extractor_manager + let multi_filter_key_extractor = filter_key_extractor_manager .acquire(existing_table_ids.clone()) .await?; if let FilterKeyExtractorImpl::Multi(multi) = &multi_filter_key_extractor { diff --git a/src/storage/src/hummock/error.rs b/src/storage/src/hummock/error.rs index efd25c8076383..3243beb0cdcda 100644 --- a/src/storage/src/hummock/error.rs +++ b/src/storage/src/hummock/error.rs @@ -38,8 +38,6 @@ enum HummockErrorInner { ObjectIoError(Box), #[error("Meta error {0}.")] MetaError(String), - #[error("Invalid WriteBatch.")] - InvalidWriteBatch, #[error("SharedBuffer error {0}.")] SharedBufferError(String), #[error("Wait epoch error {0}.")] @@ -105,10 +103,6 @@ impl HummockError { HummockErrorInner::MetaError(error.to_string()).into() } - pub fn invalid_write_batch() -> HummockError { - HummockErrorInner::InvalidWriteBatch.into() - } - pub fn shared_buffer_error(error: impl ToString) -> HummockError { HummockErrorInner::SharedBufferError(error.to_string()).into() } diff --git a/src/storage/src/hummock/event_handler/hummock_event_handler.rs b/src/storage/src/hummock/event_handler/hummock_event_handler.rs index d9e25ebe46555..c55b73e6af6b0 100644 --- a/src/storage/src/hummock/event_handler/hummock_event_handler.rs +++ b/src/storage/src/hummock/event_handler/hummock_event_handler.rs @@ -30,6 +30,7 @@ use tracing::{error, info, trace, warn}; use super::refiller::{CacheRefillConfig, CacheRefiller}; use super::{LocalInstanceGuard, LocalInstanceId, ReadVersionMappingType}; +use crate::filter_key_extractor::FilterKeyExtractorManager; use crate::hummock::compactor::{compact, CompactorContext}; use crate::hummock::conflict_detector::ConflictDetector; use crate::hummock::event_handler::refiller::CacheRefillerEvent; @@ -133,6 +134,7 @@ async fn flush_imms( payload: UploadTaskPayload, task_info: UploadTaskInfo, compactor_context: CompactorContext, + filter_key_extractor_manager: FilterKeyExtractorManager, sstable_object_id_manager: Arc, ) -> HummockResult> { for epoch in &task_info.epochs { @@ -148,6 +150,7 @@ async fn flush_imms( sstable_object_id_manager, payload, task_info.compaction_group_index, + filter_key_extractor_manager, ) .verbose_instrument_await("shared_buffer_compact") .await @@ -159,6 +162,7 @@ impl HummockEventHandler { hummock_event_rx: mpsc::UnboundedReceiver, pinned_version: PinnedVersion, compactor_context: CompactorContext, + filter_key_extractor_manager: FilterKeyExtractorManager, sstable_object_id_manager: Arc, state_store_metrics: Arc, cache_refill_config: CacheRefillConfig, @@ -184,6 +188,7 @@ impl HummockEventHandler { payload, task_info, upload_compactor_context.clone(), + filter_key_extractor_manager.clone(), cloned_sstable_object_id_manager.clone(), )) }), diff --git a/src/storage/src/hummock/event_handler/mod.rs b/src/storage/src/hummock/event_handler/mod.rs index 7b9bfd09835cd..4a80ff1a51033 100644 --- a/src/storage/src/hummock/event_handler/mod.rs +++ b/src/storage/src/hummock/event_handler/mod.rs @@ -22,8 +22,8 @@ use risingwave_pb::hummock::version_update_payload; use tokio::sync::{mpsc, oneshot}; use crate::hummock::shared_buffer::shared_buffer_batch::SharedBufferBatch; -use crate::hummock::store::memtable::ImmutableMemtable; use crate::hummock::HummockResult; +use crate::mem_table::ImmutableMemtable; use crate::store::SyncResult; pub mod hummock_event_handler; diff --git a/src/storage/src/hummock/event_handler/refiller.rs b/src/storage/src/hummock/event_handler/refiller.rs index 7e14f92bd76a4..e9eabb952d2d2 100644 --- a/src/storage/src/hummock/event_handler/refiller.rs +++ b/src/storage/src/hummock/event_handler/refiller.rs @@ -19,69 +19,101 @@ use std::sync::{Arc, LazyLock}; use std::task::{ready, Context, Poll}; use std::time::{Duration, Instant}; +use foyer::common::code::Key; use futures::future::{join_all, try_join_all}; use futures::{Future, FutureExt}; use itertools::Itertools; -use prometheus::core::{AtomicU64, GenericCounter}; +use prometheus::core::{AtomicU64, GenericCounter, GenericCounterVec}; use prometheus::{ - register_histogram_vec_with_registry, register_histogram_with_registry, - register_int_counter_with_registry, register_int_gauge_with_registry, Histogram, HistogramVec, - IntGauge, Registry, + register_histogram_vec_with_registry, register_int_counter_vec_with_registry, + register_int_gauge_with_registry, Histogram, HistogramVec, IntGauge, Registry, }; use risingwave_common::monitor::GLOBAL_METRICS_REGISTRY; +use risingwave_common::util::iter_util::ZipEqFast; use risingwave_hummock_sdk::compaction_group::hummock_version_ext::SstDeltaInfo; use tokio::sync::Semaphore; use tokio::task::JoinHandle; +use crate::hummock::file_cache::preclude::*; use crate::hummock::local_version::pinned_version::PinnedVersion; -use crate::hummock::{HummockResult, SstableStoreRef, TableHolder}; +use crate::hummock::{ + Block, HummockError, HummockResult, Sstable, SstableBlockIndex, SstableStoreRef, TableHolder, +}; use crate::monitor::StoreLocalStatistic; pub static GLOBAL_CACHE_REFILL_METRICS: LazyLock = LazyLock::new(|| CacheRefillMetrics::new(&GLOBAL_METRICS_REGISTRY)); pub struct CacheRefillMetrics { - pub data_refill_duration: HistogramVec, + pub refill_duration: HistogramVec, + pub refill_total: GenericCounterVec, + pub refill_bytes: GenericCounterVec, + + pub data_refill_success_duration: Histogram, + pub meta_refill_success_duration: Histogram, - pub data_refill_duration_admitted: Histogram, - pub data_refill_duration_rejected: Histogram, pub data_refill_filtered_total: GenericCounter, + pub data_refill_attempts_total: GenericCounter, + pub data_refill_started_total: GenericCounter, + pub meta_refill_attempts_total: GenericCounter, - pub meta_refill_duration: Histogram, + pub data_refill_ideal_bytes: GenericCounter, + pub data_refill_success_bytes: GenericCounter, pub refill_queue_total: IntGauge, } impl CacheRefillMetrics { pub fn new(registry: &Registry) -> Self { - let data_refill_duration = register_histogram_vec_with_registry!( - "data_refill_duration", - "data refill duration", - &["op"], + let refill_duration = register_histogram_vec_with_registry!( + "refill_duration", + "refill duration", + &["type", "op"], registry, ) .unwrap(); - let data_refill_duration_admitted = data_refill_duration - .get_metric_with_label_values(&["admitted"]) - .unwrap(); - let data_refill_duration_rejected = data_refill_duration - .get_metric_with_label_values(&["rejected"]) - .unwrap(); - - let data_refill_filtered_total = register_int_counter_with_registry!( - "data_refill_filtered_total", - "data refill filtered total", + let refill_total = register_int_counter_vec_with_registry!( + "refill_total", + "refill total", + &["type", "op"], registry, ) .unwrap(); - - let meta_refill_duration = register_histogram_with_registry!( - "meta_refill_duration", - "meta refill duration", + let refill_bytes = register_int_counter_vec_with_registry!( + "refill_bytes", + "refill bytes", + &["type", "op"], registry, ) .unwrap(); + let data_refill_success_duration = refill_duration + .get_metric_with_label_values(&["data", "success"]) + .unwrap(); + let meta_refill_success_duration = refill_duration + .get_metric_with_label_values(&["meta", "success"]) + .unwrap(); + + let data_refill_filtered_total = refill_total + .get_metric_with_label_values(&["data", "filtered"]) + .unwrap(); + let data_refill_attempts_total = refill_total + .get_metric_with_label_values(&["data", "attempts"]) + .unwrap(); + let data_refill_started_total = refill_total + .get_metric_with_label_values(&["data", "started"]) + .unwrap(); + let meta_refill_attempts_total = refill_total + .get_metric_with_label_values(&["meta", "attempts"]) + .unwrap(); + + let data_refill_ideal_bytes = refill_bytes + .get_metric_with_label_values(&["data", "ideal"]) + .unwrap(); + let data_refill_success_bytes = refill_bytes + .get_metric_with_label_values(&["data", "success"]) + .unwrap(); + let refill_queue_total = register_int_gauge_with_registry!( "refill_queue_total", "refill queue total", @@ -90,11 +122,20 @@ impl CacheRefillMetrics { .unwrap(); Self { - data_refill_duration, - data_refill_duration_admitted, - data_refill_duration_rejected, + refill_duration, + refill_total, + refill_bytes, + + data_refill_success_duration, + meta_refill_success_duration, data_refill_filtered_total, - meta_refill_duration, + data_refill_attempts_total, + data_refill_started_total, + meta_refill_attempts_total, + + data_refill_ideal_bytes, + data_refill_success_bytes, + refill_queue_total, } } @@ -102,9 +143,22 @@ impl CacheRefillMetrics { #[derive(Debug)] pub struct CacheRefillConfig { + /// Cache refill timeout. pub timeout: Duration, + + /// Data file cache refill levels. pub data_refill_levels: HashSet, + + /// Data file cache refill concurrency. pub concurrency: usize, + + /// Data file cache refill unit (blocks). + pub unit: usize, + + /// Data file cache reill unit threshold. + /// + /// Only units whose admit rate > threshold will be refilled. + pub threshold: f64, } struct Item { @@ -228,19 +282,22 @@ impl CacheRefillTask { context: &CacheRefillContext, delta: &SstDeltaInfo, ) -> HummockResult> { - let stats = StoreLocalStatistic::default(); let tasks = delta .insert_sst_infos .iter() .map(|info| async { - let _timer = GLOBAL_CACHE_REFILL_METRICS - .meta_refill_duration - .start_timer(); - context.sstable_store.sstable_syncable(info, &stats).await + let mut stats = StoreLocalStatistic::default(); + GLOBAL_CACHE_REFILL_METRICS.meta_refill_attempts_total.inc(); + + let now = Instant::now(); + let res = context.sstable_store.sstable(info, &mut stats).await; + GLOBAL_CACHE_REFILL_METRICS + .meta_refill_success_duration + .observe(now.elapsed().as_secs_f64()); + res }) .collect_vec(); - let res = try_join_all(tasks).await?; - let holders = res.into_iter().map(|(holder, _, _)| holder).collect_vec(); + let holders = try_join_all(tasks).await?; Ok(holders) } @@ -249,10 +306,8 @@ impl CacheRefillTask { delta: &SstDeltaInfo, holders: Vec, ) { - let now = Instant::now(); - // return if data file cache is disabled - let Some(filter) = context.sstable_store.data_file_cache_refill_filter() else { + let Some(filter) = context.sstable_store.data_recent_filter() else { return; }; @@ -271,44 +326,133 @@ impl CacheRefillTask { .iter() .any(|id| filter.contains(id)) { - let blocks = holders - .iter() - .map(|meta| meta.value().block_count() as u64) - .sum(); - GLOBAL_CACHE_REFILL_METRICS - .data_refill_filtered_total - .inc_by(blocks); + GLOBAL_CACHE_REFILL_METRICS.data_refill_filtered_total.inc(); return; } let mut tasks = vec![]; for sst_info in &holders { - for block_index in 0..sst_info.value().block_count() { - let meta = sst_info.value(); - let mut stat = StoreLocalStatistic::default(); + let task = async move { + if let Err(e) = Self::data_file_cache_refill_impl(context, sst_info.value()).await { + tracing::warn!("data cache refill error: {:?}", e); + } + }; + tasks.push(task); + } + + join_all(tasks).await; + } + + async fn data_file_cache_refill_impl( + context: &CacheRefillContext, + sst: &Sstable, + ) -> HummockResult<()> { + let sstable_store = &context.sstable_store; + let object_id = sst.id; + let unit = context.config.unit; + let threshold = context.config.threshold; + + if let Some(filter) = sstable_store.data_recent_filter() { + filter.insert(object_id); + } + + let mut tasks = vec![]; + + // unit-level refill: + // + // Although file cache receivces item by block, a larger range of data is still recommended to reduce + // S3 iops and per request base latency waste. + // + // To decide which unit to refill, we calculate the ratio that the block of a unit will be received by + // file cache. If the ratio is higher than a threshold, we fetich and refill the whole unit by block. + + for block_index_start in (0..sst.block_count()).step_by(unit) { + let block_index_end = std::cmp::min(block_index_start + unit, sst.block_count()); + + let (range_first, _) = sst.calculate_block_info(block_index_start); + let (range_last, _) = sst.calculate_block_info(block_index_end - 1); + let range = range_first.start..range_last.end; + + GLOBAL_CACHE_REFILL_METRICS + .data_refill_ideal_bytes + .inc_by((range.end - range.start) as u64); + + let mut writers = Vec::with_capacity(block_index_end - block_index_start); + let mut ranges = Vec::with_capacity(block_index_end - block_index_start); + let mut admits = 0; + + for block_index in block_index_start..block_index_end { + let (range, uncompressed_capacity) = sst.calculate_block_info(block_index); + let key = SstableBlockIndex { + sst_id: object_id, + block_idx: block_index as u64, + }; + let mut writer = sstable_store + .data_file_cache() + .writer(key, key.serialized_len() + uncompressed_capacity); + + if writer.judge() { + admits += 1; + } + + writers.push(writer); + ranges.push(range); + } + + if admits as f64 / writers.len() as f64 >= threshold { let task = async move { + GLOBAL_CACHE_REFILL_METRICS.data_refill_attempts_total.inc(); + let permit = context.concurrency.acquire().await.unwrap(); - match context - .sstable_store - .may_fill_data_file_cache(meta, block_index, &mut stat) - .await - { - Ok(true) => GLOBAL_CACHE_REFILL_METRICS - .data_refill_duration_admitted - .observe(now.elapsed().as_secs_f64()), - Ok(false) => GLOBAL_CACHE_REFILL_METRICS - .data_refill_duration_rejected - .observe(now.elapsed().as_secs_f64()), - Err(e) => { - tracing::warn!("data cache refill error: {:?}", e); - } + + GLOBAL_CACHE_REFILL_METRICS.data_refill_started_total.inc(); + + let timer = GLOBAL_CACHE_REFILL_METRICS + .data_refill_success_duration + .start_timer(); + + let data = sstable_store + .store() + .read(&sstable_store.get_sst_data_path(object_id), range.clone()) + .await?; + let mut futures = vec![]; + for (mut writer, r) in writers.into_iter().zip_eq_fast(ranges) { + let offset = r.start - range.start; + let len = r.end - r.start; + let bytes = data.slice(offset..offset + len); + + let future = async move { + let block = Block::decode( + bytes, + writer.weight() - writer.key().serialized_len(), + )?; + let block = Box::new(block); + writer.force(); + let res = writer.finish(block).await.map_err(HummockError::file_cache); + if matches!(res, Ok(true)) { + GLOBAL_CACHE_REFILL_METRICS + .data_refill_success_bytes + .inc_by(len as u64); + } + res + }; + futures.push(future); } + try_join_all(futures) + .await + .map_err(HummockError::file_cache)?; + drop(permit); + drop(timer); + + Ok::<_, HummockError>(()) }; tasks.push(task); } } - join_all(tasks).await; + try_join_all(tasks).await?; + + Ok(()) } } diff --git a/src/storage/src/hummock/event_handler/uploader.rs b/src/storage/src/hummock/event_handler/uploader.rs index f57ac33bfe6a2..a07da55fb7046 100644 --- a/src/storage/src/hummock/event_handler/uploader.rs +++ b/src/storage/src/hummock/event_handler/uploader.rs @@ -36,10 +36,10 @@ use crate::hummock::compactor::{merge_imms_in_memory, CompactionExecutor}; use crate::hummock::event_handler::hummock_event_handler::BufferTracker; use crate::hummock::event_handler::LocalInstanceId; use crate::hummock::local_version::pinned_version::PinnedVersion; -use crate::hummock::store::memtable::{ImmId, ImmutableMemtable}; use crate::hummock::store::version::StagingSstableInfo; use crate::hummock::utils::MemoryTracker; -use crate::hummock::{HummockError, HummockResult}; +use crate::hummock::{HummockError, HummockResult, ImmutableMemtable}; +use crate::mem_table::ImmId; use crate::monitor::HummockStateStoreMetrics; use crate::opts::StorageOpts; @@ -1044,9 +1044,9 @@ mod tests { }; use crate::hummock::local_version::pinned_version::PinnedVersion; use crate::hummock::shared_buffer::shared_buffer_batch::SharedBufferBatch; - use crate::hummock::store::memtable::{ImmId, ImmutableMemtable}; use crate::hummock::value::HummockValue; use crate::hummock::{HummockError, HummockResult, MemoryLimiter}; + use crate::mem_table::{ImmId, ImmutableMemtable}; use crate::monitor::HummockStateStoreMetrics; use crate::opts::StorageOpts; use crate::storage_value::StorageValue; @@ -1081,7 +1081,7 @@ mod tests { limiter: Option<&MemoryLimiter>, ) -> ImmutableMemtable { let sorted_items = SharedBufferBatch::build_shared_buffer_item_batches(vec![( - Bytes::from(dummy_table_key()), + TableKey(Bytes::from(dummy_table_key())), StorageValue::new_delete(), )]); let size = SharedBufferBatch::measure_batch_size(&sorted_items); @@ -1641,9 +1641,6 @@ mod tests { (buffer_tracker, uploader, new_task_notifier) } - // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. - // TODO: remove `allow` here after the issued is closed. - #[expect(clippy::needless_pass_by_ref_mut)] async fn assert_uploader_pending(uploader: &mut HummockUploader) { for _ in 0..10 { yield_now().await; diff --git a/src/storage/src/hummock/file_cache/store.rs b/src/storage/src/hummock/file_cache/store.rs index da47c1562eaee..222700c8376b2 100644 --- a/src/storage/src/hummock/file_cache/store.rs +++ b/src/storage/src/hummock/file_cache/store.rs @@ -20,29 +20,25 @@ use std::time::Duration; use bytes::{Buf, BufMut, Bytes}; use foyer::common::code::{Key, Value}; -use foyer::storage::admission::rated_random::RatedRandomAdmissionPolicy; +use foyer::intrusive::eviction::lfu::LfuConfig; +use foyer::storage::admission::rated_ticket::RatedTicketAdmissionPolicy; use foyer::storage::admission::AdmissionPolicy; -use foyer::storage::event::EventListener; +use foyer::storage::device::fs::FsDeviceConfig; pub use foyer::storage::metrics::set_metrics_registry as set_foyer_metrics_registry; -use foyer::storage::store::FetchValueFuture; -use foyer::storage::LfuFsStoreConfig; -use risingwave_common::util::runtime::BackgroundShutdownRuntime; +use foyer::storage::reinsertion::ReinsertionPolicy; +use foyer::storage::runtime::{ + RuntimeConfig, RuntimeLazyStore, RuntimeLazyStoreConfig, RuntimeLazyStoreWriter, +}; +use foyer::storage::storage::{Storage, StorageWriter}; +use foyer::storage::store::{LfuFsStoreConfig, NoneStore, NoneStoreWriter}; use risingwave_hummock_sdk::HummockSstableObjectId; use crate::hummock::{Block, Sstable, SstableMeta}; -#[derive(thiserror::Error, Debug)] -pub enum FileCacheError { - #[error("foyer error: {0}")] - Foyer(#[from] foyer::storage::error::Error), - #[error("other {0}")] - Other(#[from] Box), -} - -impl FileCacheError { - fn foyer(e: foyer::storage::error::Error) -> Self { - Self::Foyer(e) - } +pub mod preclude { + pub use foyer::storage::storage::{ + AsyncStorageExt, ForceStorageExt, Storage, StorageExt, StorageWriter, + }; } pub type Result = core::result::Result; @@ -50,11 +46,11 @@ pub type Result = core::result::Result; pub type EvictionConfig = foyer::intrusive::eviction::lfu::LfuConfig; pub type DeviceConfig = foyer::storage::device::fs::FsDeviceConfig; -pub type FoyerStore = foyer::storage::LfuFsStore; -pub type FoyerStoreResult = foyer::storage::error::Result; -pub type FoyerStoreError = foyer::storage::error::Error; +pub type FileCacheResult = foyer::storage::error::Result; +pub type FileCacheError = foyer::storage::error::Error; -pub struct FoyerStoreConfig +#[derive(Debug)] +pub struct FileCacheConfig where K: Key, V: Value, @@ -73,316 +69,321 @@ where pub recover_concurrency: usize, pub lfu_window_to_cache_size_ratio: usize, pub lfu_tiny_lru_capacity_ratio: f64, - pub rated_random_rate: usize, - pub event_listener: Vec>>, - pub enable_filter: bool, + pub insert_rate_limit: usize, + pub allocator_bits: usize, + pub allocation_timeout: Duration, + pub admissions: Vec>>, + pub reinsertions: Vec>>, } -pub struct FoyerRuntimeConfig +impl Clone for FileCacheConfig where K: Key, V: Value, { - pub foyer_store_config: FoyerStoreConfig, - pub runtime_worker_threads: Option, + fn clone(&self) -> Self { + Self { + name: self.name.clone(), + dir: self.dir.clone(), + capacity: self.capacity, + file_capacity: self.file_capacity, + buffer_pool_size: self.buffer_pool_size, + device_align: self.device_align, + device_io_size: self.device_io_size, + flushers: self.flushers, + flush_rate_limit: self.flush_rate_limit, + reclaimers: self.reclaimers, + reclaim_rate_limit: self.reclaim_rate_limit, + recover_concurrency: self.recover_concurrency, + lfu_window_to_cache_size_ratio: self.lfu_window_to_cache_size_ratio, + lfu_tiny_lru_capacity_ratio: self.lfu_tiny_lru_capacity_ratio, + insert_rate_limit: self.insert_rate_limit, + allocator_bits: self.allocator_bits, + allocation_timeout: self.allocation_timeout, + admissions: self.admissions.clone(), + reinsertions: self.reinsertions.clone(), + } + } } -#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] -pub struct SstableBlockIndex { - pub sst_id: HummockSstableObjectId, - pub block_idx: u64, +#[derive(Debug)] +pub enum FileCacheWriter +where + K: Key, + V: Value, +{ + Foyer { + writer: RuntimeLazyStoreWriter, + }, + None { + writer: NoneStoreWriter, + }, } -impl Key for SstableBlockIndex { - fn serialized_len(&self) -> usize { - 8 + 8 // sst_id (8B) + block_idx (8B) - } - - fn write(&self, mut buf: &mut [u8]) { - buf.put_u64(self.sst_id); - buf.put_u64(self.block_idx); +impl From> for FileCacheWriter +where + K: Key, + V: Value, +{ + fn from(writer: RuntimeLazyStoreWriter) -> Self { + Self::Foyer { writer } } +} - fn read(mut buf: &[u8]) -> Self { - let sst_id = buf.get_u64(); - let block_idx = buf.get_u64(); - Self { sst_id, block_idx } +impl From> for FileCacheWriter +where + K: Key, + V: Value, +{ + fn from(writer: NoneStoreWriter) -> Self { + Self::None { writer } } } -impl Value for Box { - fn serialized_len(&self) -> usize { - self.raw_data().len() - } +impl StorageWriter for FileCacheWriter +where + K: Key, + V: Value, +{ + type Key = K; + type Value = V; - fn write(&self, mut buf: &mut [u8]) { - buf.put_slice(self.raw_data()) + fn key(&self) -> &Self::Key { + match self { + FileCacheWriter::Foyer { writer } => writer.key(), + FileCacheWriter::None { writer } => writer.key(), + } } - fn read(buf: &[u8]) -> Self { - let data = Bytes::copy_from_slice(buf); - let block = Block::decode_from_raw(data); - Box::new(block) + fn weight(&self) -> usize { + match self { + FileCacheWriter::Foyer { writer } => writer.weight(), + FileCacheWriter::None { writer } => writer.weight(), + } } -} -impl Value for Box { - fn serialized_len(&self) -> usize { - 8 + self.meta.encoded_size() // id (8B) + meta size + fn judge(&mut self) -> bool { + match self { + FileCacheWriter::Foyer { writer } => writer.judge(), + FileCacheWriter::None { writer } => writer.judge(), + } } - fn write(&self, mut buf: &mut [u8]) { - buf.put_u64(self.id); - // TODO(MrCroxx): avoid buffer copy - let mut buffer = vec![]; - self.meta.encode_to(&mut buffer); - buf.put_slice(&buffer[..]) + fn force(&mut self) { + match self { + FileCacheWriter::Foyer { writer } => writer.force(), + FileCacheWriter::None { writer } => writer.force(), + } } - fn read(mut buf: &[u8]) -> Self { - let id = buf.get_u64(); - let meta = SstableMeta::decode(buf).unwrap(); - Box::new(Sstable::new(id, meta)) + async fn finish(self, value: Self::Value) -> FileCacheResult { + match self { + FileCacheWriter::Foyer { writer } => writer.finish(value).await, + FileCacheWriter::None { writer } => writer.finish(value).await, + } } } -#[derive(Clone)] +#[derive(Debug)] pub enum FileCache where - K: Key + Copy, + K: Key, V: Value, { - None, - FoyerRuntime { - runtime: Arc, - store: Arc>, - enable_filter: bool, - }, + Foyer { store: RuntimeLazyStore }, + None { store: NoneStore }, +} + +impl Clone for FileCache +where + K: Key, + V: Value, +{ + fn clone(&self) -> Self { + match self { + Self::Foyer { store } => Self::Foyer { + store: store.clone(), + }, + Self::None { store } => Self::None { + store: store.clone(), + }, + } + } } impl FileCache where - K: Key + Copy, + K: Key, V: Value, { pub fn none() -> Self { - Self::None + Self::None { + store: NoneStore::default(), + } } +} - pub async fn foyer(config: FoyerRuntimeConfig) -> Result { - let mut builder = tokio::runtime::Builder::new_multi_thread(); - if let Some(runtime_worker_threads) = config.runtime_worker_threads { - builder.worker_threads(runtime_worker_threads); +impl Storage for FileCache +where + K: Key, + V: Value, +{ + type Config = FileCacheConfig; + type Key = K; + type Value = V; + type Writer = FileCacheWriter; + + async fn open(config: Self::Config) -> FileCacheResult { + let mut admissions = config.admissions; + if config.insert_rate_limit > 0 { + admissions.push(Arc::new(RatedTicketAdmissionPolicy::new( + config.insert_rate_limit, + ))); } - let runtime = builder - .thread_name("risingwave-foyer-storage") - .enable_all() - .build() - .map_err(|e| FileCacheError::Other(e.into()))?; - - let enable_filter = config.foyer_store_config.enable_filter; - - let store = runtime - .spawn(async move { - let foyer_store_config = config.foyer_store_config; - - let file_capacity = foyer_store_config.file_capacity; - let capacity = foyer_store_config.capacity; - let capacity = capacity - (capacity % file_capacity); - - let mut admissions: Vec>> = vec![]; - if foyer_store_config.rated_random_rate > 0 { - let rr = RatedRandomAdmissionPolicy::new( - foyer_store_config.rated_random_rate, - Duration::from_millis(100), - ); - admissions.push(Arc::new(rr)); - } - - let c = LfuFsStoreConfig { - name: foyer_store_config.name, - eviction_config: EvictionConfig { - window_to_cache_size_ratio: foyer_store_config - .lfu_window_to_cache_size_ratio, - tiny_lru_capacity_ratio: foyer_store_config.lfu_tiny_lru_capacity_ratio, - }, - device_config: DeviceConfig { - dir: foyer_store_config.dir.clone(), - capacity, - file_capacity, - align: foyer_store_config.device_align, - io_size: foyer_store_config.device_io_size, - }, - admissions, - reinsertions: vec![], - buffer_pool_size: foyer_store_config.buffer_pool_size, - flushers: foyer_store_config.flushers, - flush_rate_limit: foyer_store_config.flush_rate_limit, - reclaimers: foyer_store_config.reclaimers, - reclaim_rate_limit: foyer_store_config.reclaim_rate_limit, - recover_concurrency: foyer_store_config.recover_concurrency, - event_listeners: foyer_store_config.event_listener, - clean_region_threshold: foyer_store_config.reclaimers - + foyer_store_config.reclaimers / 2, - }; - - FoyerStore::open(c).await.map_err(FileCacheError::foyer) - }) - .await - .unwrap()?; - - Ok(Self::FoyerRuntime { - runtime: Arc::new(runtime.into()), - store, - enable_filter, - }) - } - #[tracing::instrument(skip(self, value))] - pub async fn insert(&self, key: K, value: V) -> Result { - match self { - FileCache::None => Ok(false), - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - runtime - .spawn(async move { store.insert_if_not_exists(key, value).await }) - .await - .unwrap() - .map_err(FileCacheError::foyer) + let c = RuntimeLazyStoreConfig { + store: LfuFsStoreConfig { + name: config.name.clone(), + eviction_config: LfuConfig { + window_to_cache_size_ratio: config.lfu_window_to_cache_size_ratio, + tiny_lru_capacity_ratio: config.lfu_tiny_lru_capacity_ratio, + }, + device_config: FsDeviceConfig { + dir: config.dir, + capacity: config.capacity, + file_capacity: config.file_capacity, + align: config.device_align, + io_size: config.device_io_size, + }, + allocator_bits: config.allocator_bits, + catalog_bits: 6, + admissions, + reinsertions: config.reinsertions, + buffer_pool_size: config.buffer_pool_size, + flushers: config.flushers, + flush_rate_limit: config.flush_rate_limit, + reclaimers: config.reclaimers, + reclaim_rate_limit: config.reclaim_rate_limit, + allocation_timeout: config.allocation_timeout, + clean_region_threshold: config.reclaimers + config.reclaimers / 2, + recover_concurrency: config.recover_concurrency, } - } + .into(), + runtime: RuntimeConfig { + worker_threads: None, + thread_name: Some(config.name), + }, + }; + let store = RuntimeLazyStore::open(c).await?; + Ok(Self::Foyer { store }) } - #[tracing::instrument(skip(self))] - pub fn insert_without_wait(&self, key: K, value: V) { + fn is_ready(&self) -> bool { match self { - FileCache::None => {} - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - runtime.spawn(async move { store.insert_if_not_exists(key, value).await }); - } + FileCache::Foyer { store } => store.is_ready(), + FileCache::None { store } => store.is_ready(), } } - /// only fetch value if judge pass - #[tracing::instrument(skip(self, fetch_value))] - pub async fn insert_with( - &self, - key: K, - fetch_value: F, - value_serialized_len: usize, - ) -> Result - where - F: FnOnce() -> FU, - FU: FetchValueFuture, - { + async fn close(&self) -> FileCacheResult<()> { match self { - FileCache::None => Ok(false), - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - let future = fetch_value(); - runtime - .spawn(async move { - store - .insert_if_not_exists_with_future( - key, - || future, - key.serialized_len() + value_serialized_len, - ) - .await - }) - .await - .unwrap() - .map_err(FileCacheError::foyer) - } + FileCache::Foyer { store } => store.close().await, + FileCache::None { store } => store.close().await, } } - #[tracing::instrument(skip(self))] - pub async fn remove(&self, key: &K) -> Result { + fn writer(&self, key: Self::Key, weight: usize) -> Self::Writer { match self { - FileCache::None => Ok(false), - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - let key = *key; - runtime - .spawn(async move { store.remove(&key).await }) - .await - .unwrap() - .map_err(FileCacheError::foyer) - } + FileCache::Foyer { store } => store.writer(key, weight).into(), + FileCache::None { store } => store.writer(key, weight).into(), } } - #[tracing::instrument(skip(self))] - pub fn remove_without_wait(&self, key: &K) { + fn exists(&self, key: &Self::Key) -> FileCacheResult { match self { - FileCache::None => {} - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - let key = *key; - runtime.spawn(async move { store.remove(&key).await }); - } + FileCache::Foyer { store } => store.exists(key), + FileCache::None { store } => store.exists(key), } } - #[tracing::instrument(skip(self))] - pub async fn clear(&self) -> Result<()> { + async fn lookup(&self, key: &Self::Key) -> FileCacheResult> { match self { - FileCache::None => Ok(()), - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - runtime - .spawn(async move { store.clear().await }) - .await - .unwrap() - .map_err(FileCacheError::foyer) - } + FileCache::Foyer { store } => store.lookup(key).await, + FileCache::None { store } => store.lookup(key).await, } } - #[tracing::instrument(skip(self))] - pub fn clear_without_wait(&self) { + fn remove(&self, key: &Self::Key) -> FileCacheResult { match self { - FileCache::None => {} - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - runtime.spawn(async move { store.clear().await }); - } + FileCache::Foyer { store } => store.remove(key), + FileCache::None { store } => store.remove(key), } } - #[tracing::instrument(skip(self))] - pub async fn lookup(&self, key: &K) -> Result> { + fn clear(&self) -> FileCacheResult<()> { match self { - FileCache::None => Ok(None), - FileCache::FoyerRuntime { runtime, store, .. } => { - let store = store.clone(); - let key = *key; - runtime - .spawn(async move { store.lookup(&key).await }) - .await - .unwrap() - .map_err(FileCacheError::foyer) - } + FileCache::Foyer { store } => store.clear(), + FileCache::None { store } => store.clear(), } } +} - #[tracing::instrument(skip(self))] - pub async fn exists(&self, key: &K) -> Result { - match self { - FileCache::None => Ok(false), - FileCache::FoyerRuntime { store, .. } => { - store.exists(key).map_err(FileCacheError::foyer) - } - } +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] +pub struct SstableBlockIndex { + pub sst_id: HummockSstableObjectId, + pub block_idx: u64, +} + +impl Key for SstableBlockIndex { + fn serialized_len(&self) -> usize { + 8 + 8 // sst_id (8B) + block_idx (8B) } - pub fn is_filter_enabled(&self) -> bool { - match self { - FileCache::None => false, - FileCache::FoyerRuntime { enable_filter, .. } => *enable_filter, - } + fn write(&self, mut buf: &mut [u8]) { + buf.put_u64(self.sst_id); + buf.put_u64(self.block_idx); + } + + fn read(mut buf: &[u8]) -> Self { + let sst_id = buf.get_u64(); + let block_idx = buf.get_u64(); + Self { sst_id, block_idx } + } +} + +impl Value for Box { + fn serialized_len(&self) -> usize { + self.raw_data().len() + } + + fn write(&self, mut buf: &mut [u8]) { + buf.put_slice(self.raw_data()) + } + + fn read(buf: &[u8]) -> Self { + let data = Bytes::copy_from_slice(buf); + let block = Block::decode_from_raw(data); + Box::new(block) + } +} + +impl Value for Box { + fn serialized_len(&self) -> usize { + 8 + self.meta.encoded_size() // id (8B) + meta size + } + + fn write(&self, mut buf: &mut [u8]) { + buf.put_u64(self.id); + // TODO(MrCroxx): avoid buffer copy + let mut buffer = vec![]; + self.meta.encode_to(&mut buffer); + buf.put_slice(&buffer[..]) + } + + fn read(mut buf: &[u8]) -> Self { + let id = buf.get_u64(); + let meta = SstableMeta::decode(buf).unwrap(); + Box::new(Sstable::new(id, meta)) } } diff --git a/src/storage/src/hummock/iterator/forward_user.rs b/src/storage/src/hummock/iterator/forward_user.rs index c3f6081df8c86..78da3628a3588 100644 --- a/src/storage/src/hummock/iterator/forward_user.rs +++ b/src/storage/src/hummock/iterator/forward_user.rs @@ -81,6 +81,19 @@ impl> UserIterator { } } + /// Create [`UserIterator`] with maximum epoch. + pub fn for_test(iterator: I, key_range: UserKeyRange) -> Self { + let read_epoch = HummockEpoch::MAX; + Self::new( + iterator, + key_range, + read_epoch, + 0, + None, + ForwardMergeRangeIterator::new(read_epoch), + ) + } + /// Gets the iterator move to the next step. /// /// Returned result: @@ -270,19 +283,6 @@ impl> UserIterator { #[cfg(test)] impl> UserIterator { - /// Create [`UserIterator`] with maximum epoch. - pub(crate) fn for_test(iterator: I, key_range: UserKeyRange) -> Self { - let read_epoch = HummockEpoch::MAX; - Self::new( - iterator, - key_range, - read_epoch, - 0, - None, - ForwardMergeRangeIterator::new(read_epoch), - ) - } - pub(crate) fn for_test_with_epoch( iterator: I, key_range: UserKeyRange, diff --git a/src/storage/src/hummock/iterator/merge_inner.rs b/src/storage/src/hummock/iterator/merge_inner.rs index cf92df72abcdf..8a4e29ae58633 100644 --- a/src/storage/src/hummock/iterator/merge_inner.rs +++ b/src/storage/src/hummock/iterator/merge_inner.rs @@ -137,7 +137,7 @@ impl OrderedMergeIteratorInner { impl OrderedMergeIteratorInner> { /// Used in `merge_imms_in_memory` to merge immutable memtables. - pub fn current_item(&self) -> (Bytes, (HummockEpoch, HummockValue)) { + pub fn current_item(&self) -> (TableKey, (HummockEpoch, HummockValue)) { let item = self .heap .peek() diff --git a/src/storage/src/hummock/iterator/mod.rs b/src/storage/src/hummock/iterator/mod.rs index d1e61a8621b57..2a20039eecba8 100644 --- a/src/storage/src/hummock/iterator/mod.rs +++ b/src/storage/src/hummock/iterator/mod.rs @@ -87,7 +87,6 @@ pub trait HummockIterator: Send + 'static { /// /// # Panics /// This function will panic if the iterator is invalid. - // TODO: Add lifetime fn key(&self) -> FullKey<&[u8]>; /// Retrieves the current value, decoded as [`HummockValue`]. @@ -99,7 +98,6 @@ pub trait HummockIterator: Send + 'static { /// # Panics /// This function will panic if the iterator is invalid, or the value cannot be decoded into /// [`HummockValue`]. - // TODO: Add lifetime fn value(&self) -> HummockValue<&[u8]>; /// Indicates whether the iterator can be used. diff --git a/src/storage/src/hummock/iterator/test_utils.rs b/src/storage/src/hummock/iterator/test_utils.rs index 175079da96ce4..4845d7b43a0e4 100644 --- a/src/storage/src/hummock/iterator/test_utils.rs +++ b/src/storage/src/hummock/iterator/test_utils.rs @@ -19,7 +19,7 @@ use bytes::Bytes; use itertools::Itertools; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::TableId; -use risingwave_hummock_sdk::key::{FullKey, UserKey}; +use risingwave_hummock_sdk::key::{FullKey, TableKey, UserKey}; use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId}; use risingwave_object_store::object::{ InMemObjectStore, ObjectStore, ObjectStoreImpl, ObjectStoreRef, @@ -67,6 +67,7 @@ pub fn mock_sstable_store_with_object_store(store: ObjectStoreRef) -> SstableSto 0, FileCache::none(), FileCache::none(), + None, )) } @@ -118,10 +119,10 @@ pub fn iterator_test_value_of(idx: usize) -> Vec { pub fn transform_shared_buffer( batches: Vec<(Vec, HummockValue)>, -) -> Vec<(Bytes, HummockValue)> { +) -> Vec<(TableKey, HummockValue)> { batches .into_iter() - .map(|(k, v)| (k.into(), v)) + .map(|(k, v)| (TableKey(k.into()), v)) .collect_vec() } diff --git a/src/storage/src/hummock/mod.rs b/src/storage/src/hummock/mod.rs index 0e93eeff782bc..60553b5aa09a3 100644 --- a/src/storage/src/hummock/mod.rs +++ b/src/storage/src/hummock/mod.rs @@ -14,30 +14,18 @@ //! Hummock is the state store of the streaming system. -use std::ops::{Bound, Deref}; -use std::sync::atomic::AtomicU64; +use std::ops::Bound; use std::sync::Arc; -use std::time::Duration; -use arc_swap::ArcSwap; use bytes::Bytes; use risingwave_hummock_sdk::key::{FullKey, TableKey, UserKeyRangeRef}; use risingwave_hummock_sdk::{HummockEpoch, *}; -#[cfg(any(test, feature = "test"))] -use risingwave_pb::hummock::HummockVersion; -use risingwave_pb::hummock::{version_update_payload, SstableInfo}; -use risingwave_rpc_client::HummockMetaClient; -use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; -use tracing::log::error; +use risingwave_pb::hummock::SstableInfo; mod block_cache; pub use block_cache::*; -use crate::filter_key_extractor::RpcFilterKeyExtractorManager; -use crate::hummock::store::state_store::LocalHummockStorage; -use crate::opts::StorageOpts; - -mod file_cache; +pub mod file_cache; pub use file_cache::*; pub mod sstable; @@ -50,7 +38,6 @@ pub mod hummock_meta_client; pub mod iterator; pub mod shared_buffer; pub mod sstable_store; -mod state_store; #[cfg(any(test, feature = "test"))] pub mod test_utils; pub mod utils; @@ -60,6 +47,7 @@ pub mod event_handler; pub mod local_version; pub mod observer_manager; pub mod store; +pub use store::*; pub mod vacuum; mod validator; pub mod value; @@ -67,306 +55,14 @@ pub mod write_limiter; pub use error::*; pub use risingwave_common::cache::{CacheableEntry, LookupResult, LruCache}; -use risingwave_common_service::observer_manager::{NotificationClient, ObserverManager}; pub use validator::*; use value::*; -use self::event_handler::refiller::CacheRefillConfig; -use self::event_handler::ReadVersionMappingType; use self::iterator::HummockIterator; pub use self::sstable_store::*; -use super::monitor::HummockStateStoreMetrics; -use crate::filter_key_extractor::FilterKeyExtractorManager; -use crate::hummock::backup_reader::{BackupReader, BackupReaderRef}; -use crate::hummock::compactor::CompactorContext; -use crate::hummock::event_handler::hummock_event_handler::BufferTracker; -use crate::hummock::event_handler::{HummockEvent, HummockEventHandler}; -use crate::hummock::local_version::pinned_version::{start_pinned_version_worker, PinnedVersion}; -use crate::hummock::observer_manager::HummockObserverNode; -use crate::hummock::store::memtable::ImmutableMemtable; -use crate::hummock::store::version::HummockVersionReader; -use crate::hummock::write_limiter::{WriteLimiter, WriteLimiterRef}; -use crate::monitor::{CompactorMetrics, StoreLocalStatistic}; -use crate::store::{NewLocalOptions, ReadOptions}; - -struct HummockStorageShutdownGuard { - shutdown_sender: UnboundedSender, -} - -impl Drop for HummockStorageShutdownGuard { - fn drop(&mut self) { - let _ = self - .shutdown_sender - .send(HummockEvent::Shutdown) - .inspect_err(|e| error!("unable to send shutdown: {:?}", e)); - } -} - -/// Hummock is the state store backend. -#[derive(Clone)] -pub struct HummockStorage { - hummock_event_sender: UnboundedSender, - - context: CompactorContext, - - sstable_object_id_manager: SstableObjectIdManagerRef, - - buffer_tracker: BufferTracker, - - version_update_notifier_tx: Arc>, - - seal_epoch: Arc, - - pinned_version: Arc>, - - hummock_version_reader: HummockVersionReader, - - _shutdown_guard: Arc, - - read_version_mapping: Arc, - - backup_reader: BackupReaderRef, - - /// current_epoch < min_current_epoch cannot be read. - min_current_epoch: Arc, - - write_limiter: WriteLimiterRef, -} - -impl HummockStorage { - /// Creates a [`HummockStorage`]. - #[allow(clippy::too_many_arguments)] - pub async fn new( - options: Arc, - sstable_store: SstableStoreRef, - hummock_meta_client: Arc, - notification_client: impl NotificationClient, - filter_key_extractor_manager: Arc, - state_store_metrics: Arc, - compactor_metrics: Arc, - ) -> HummockResult { - let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( - hummock_meta_client.clone(), - options.sstable_id_remote_fetch_number, - )); - let backup_reader = BackupReader::new( - &options.backup_storage_url, - &options.backup_storage_directory, - ) - .await - .map_err(HummockError::read_backup_error)?; - let write_limiter = Arc::new(WriteLimiter::default()); - let (event_tx, mut event_rx) = unbounded_channel(); - - let observer_manager = ObserverManager::new( - notification_client, - HummockObserverNode::new( - filter_key_extractor_manager.clone(), - backup_reader.clone(), - event_tx.clone(), - write_limiter.clone(), - ), - ) - .await; - observer_manager.start().await; - - let hummock_version = match event_rx.recv().await { - Some(HummockEvent::VersionUpdate(version_update_payload::Payload::PinnedVersion(version))) => version, - _ => unreachable!("the hummock observer manager is the first one to take the event tx. Should be full hummock version") - }; - - let (pin_version_tx, pin_version_rx) = unbounded_channel(); - let pinned_version = PinnedVersion::new(hummock_version, pin_version_tx); - tokio::spawn(start_pinned_version_worker( - pin_version_rx, - hummock_meta_client.clone(), - )); - - let compactor_context = CompactorContext::new_local_compact_context( - options.clone(), - sstable_store.clone(), - compactor_metrics.clone(), - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - filter_key_extractor_manager.clone(), - ), - ); - - let seal_epoch = Arc::new(AtomicU64::new(pinned_version.max_committed_epoch())); - let min_current_epoch = Arc::new(AtomicU64::new(pinned_version.max_committed_epoch())); - let hummock_event_handler = HummockEventHandler::new( - event_tx.clone(), - event_rx, - pinned_version, - compactor_context.clone(), - sstable_object_id_manager.clone(), - state_store_metrics.clone(), - CacheRefillConfig { - timeout: Duration::from_millis(options.cache_refill_timeout_ms), - data_refill_levels: options - .cache_refill_data_refill_levels - .iter() - .copied() - .collect(), - concurrency: options.cache_refill_concurrency, - }, - ); - - let instance = Self { - context: compactor_context, - sstable_object_id_manager, - buffer_tracker: hummock_event_handler.buffer_tracker().clone(), - version_update_notifier_tx: hummock_event_handler.version_update_notifier_tx(), - seal_epoch, - hummock_event_sender: event_tx.clone(), - pinned_version: hummock_event_handler.pinned_version(), - hummock_version_reader: HummockVersionReader::new( - sstable_store, - state_store_metrics.clone(), - ), - _shutdown_guard: Arc::new(HummockStorageShutdownGuard { - shutdown_sender: event_tx, - }), - read_version_mapping: hummock_event_handler.read_version_mapping(), - backup_reader, - min_current_epoch, - write_limiter, - }; - - tokio::spawn(hummock_event_handler.start_hummock_event_handler_worker()); - - Ok(instance) - } - - async fn new_local_inner(&self, option: NewLocalOptions) -> LocalHummockStorage { - let (tx, rx) = tokio::sync::oneshot::channel(); - self.hummock_event_sender - .send(HummockEvent::RegisterReadVersion { - table_id: option.table_id, - new_read_version_sender: tx, - is_replicated: option.is_replicated, - }) - .unwrap(); - - let (basic_read_version, instance_guard) = rx.await.unwrap(); - let version_update_notifier_tx = self.version_update_notifier_tx.clone(); - LocalHummockStorage::new( - instance_guard, - basic_read_version, - self.hummock_version_reader.clone(), - self.hummock_event_sender.clone(), - self.buffer_tracker.get_memory_limiter().clone(), - self.write_limiter.clone(), - option, - version_update_notifier_tx, - ) - } - - pub fn sstable_store(&self) -> SstableStoreRef { - self.context.sstable_store.clone() - } - - pub fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef { - &self.sstable_object_id_manager - } - - pub fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManager { - &self.context.filter_key_extractor_manager - } - - pub fn get_memory_limiter(&self) -> Arc { - self.buffer_tracker.get_memory_limiter().clone() - } - - pub fn get_pinned_version(&self) -> PinnedVersion { - self.pinned_version.load().deref().deref().clone() - } - - pub fn backup_reader(&self) -> BackupReaderRef { - self.backup_reader.clone() - } -} - -#[cfg(any(test, feature = "test"))] -impl HummockStorage { - /// Used in the compaction test tool - pub async fn update_version_and_wait(&self, version: HummockVersion) { - use tokio::task::yield_now; - let version_id = version.id; - self.hummock_event_sender - .send(HummockEvent::VersionUpdate( - version_update_payload::Payload::PinnedVersion(version), - )) - .unwrap(); - loop { - if self.pinned_version.load().id() >= version_id { - break; - } - - yield_now().await - } - } - - pub async fn wait_version(&self, version: HummockVersion) { - use tokio::task::yield_now; - loop { - if self.pinned_version.load().id() >= version.id { - break; - } - - yield_now().await - } - } - - pub fn get_shared_buffer_size(&self) -> usize { - self.buffer_tracker.get_buffer_size() - } - - pub async fn try_wait_epoch_for_test(&self, wait_epoch: u64) { - let mut rx = self.version_update_notifier_tx.subscribe(); - while *(rx.borrow_and_update()) < wait_epoch { - rx.changed().await.unwrap(); - } - } - - /// Creates a [`HummockStorage`] with default stats. Should only be used by tests. - pub async fn for_test( - options: Arc, - sstable_store: SstableStoreRef, - hummock_meta_client: Arc, - notification_client: impl NotificationClient, - ) -> HummockResult { - Self::new( - options, - sstable_store, - hummock_meta_client, - notification_client, - Arc::new(RpcFilterKeyExtractorManager::default()), - Arc::new(HummockStateStoreMetrics::unused()), - Arc::new(CompactorMetrics::unused()), - ) - .await - } - - pub fn storage_opts(&self) -> &Arc { - &self.context.storage_opts - } - - pub fn version_reader(&self) -> &HummockVersionReader { - &self.hummock_version_reader - } - - #[cfg(any(test, feature = "test"))] - pub async fn wait_version_update(&self, old_id: u64) -> u64 { - use tokio::task::yield_now; - loop { - let cur_id = self.pinned_version.load().id(); - if cur_id > old_id { - return cur_id; - } - yield_now().await; - } - } -} +use crate::mem_table::ImmutableMemtable; +use crate::monitor::StoreLocalStatistic; +use crate::store::ReadOptions; pub async fn get_from_sstable_info( sstable_store_ref: SstableStoreRef, diff --git a/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs b/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs index 7e67b87434e4a..8a3ab574ef3d5 100644 --- a/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs +++ b/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs @@ -32,10 +32,10 @@ use crate::hummock::iterator::{ Backward, DeleteRangeIterator, DirectionEnum, Forward, HummockIterator, HummockIteratorDirection, }; -use crate::hummock::store::memtable::ImmId; use crate::hummock::utils::{range_overlap, MemoryTracker}; use crate::hummock::value::HummockValue; use crate::hummock::{HummockEpoch, HummockResult, MonotonicDeleteEvent}; +use crate::mem_table::ImmId; use crate::storage_value::StorageValue; use crate::store::ReadOptions; @@ -51,13 +51,13 @@ fn whether_update_largest_key, Q: AsRef<[u8]>>( } /// The key is `table_key`, which does not contain table id or epoch. -pub(crate) type SharedBufferItem = (Bytes, HummockValue); +pub(crate) type SharedBufferItem = (TableKey, HummockValue); pub type SharedBufferBatchId = u64; /// A shared buffer may contain data from multiple epochs, /// there are multiple versions for a given key (`table_key`), we put those versions into a vector /// and sort them in descending order, aka newest to oldest. -pub type SharedBufferVersionedEntry = (Bytes, Vec<(HummockEpoch, HummockValue)>); +pub type SharedBufferVersionedEntry = (TableKey, Vec<(HummockEpoch, HummockValue)>); type PointRangePair = (PointRange>, PointRange>); struct SharedBufferDeleteRangeMeta { @@ -142,11 +142,11 @@ impl SharedBufferBatchInner { if let Some(item) = payload.last() { if whether_update_largest_key(&largest_table_key, &item.0) { - largest_table_key = Bound::Included(item.0.clone()); + largest_table_key = Bound::Included(item.0.clone().0); } } if let Some(item) = payload.first() { - if smallest_empty || item.0.lt(&smallest_table_key.as_ref()) { + if smallest_empty || item.0.as_ref().lt(smallest_table_key.as_ref()) { smallest_table_key.clear(); smallest_table_key.extend_from_slice(item.0.as_ref()); } @@ -562,7 +562,7 @@ impl SharedBufferBatch { } pub fn build_shared_buffer_item_batches( - kv_pairs: Vec<(Bytes, StorageValue)>, + kv_pairs: Vec<(TableKey, StorageValue)>, ) -> Vec { kv_pairs .into_iter() @@ -675,7 +675,7 @@ impl SharedBufferBatchIterator { } } - pub(crate) fn current_item(&self) -> (&Bytes, &(HummockEpoch, HummockValue)) { + pub(crate) fn current_item(&self) -> (&TableKey, &(HummockEpoch, HummockValue)) { assert!(self.is_valid(), "iterator is not valid"); let (idx, version_idx) = match D::direction() { DirectionEnum::Forward => (self.current_idx, self.current_version_idx), diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index 0042ad1614ec5..809f797bb11e8 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -164,6 +164,11 @@ impl Debug for Block { } impl Block { + pub fn get_algorithm(buf: &Bytes) -> HummockResult { + let compression = CompressionAlgorithm::decode(&mut &buf[buf.len() - 9..buf.len() - 8])?; + Ok(compression) + } + pub fn decode(buf: Bytes, uncompressed_capacity: usize) -> HummockResult { // Verify checksum. @@ -467,7 +472,14 @@ impl BlockBuilder { debug_assert!(!key.is_empty()); debug_assert_eq!( KeyComparator::compare_encoded_full_key(&self.last_key[..], &key[..]), - Ordering::Less + Ordering::Less, + "epoch: {}, table key: {}", + full_key.epoch, + u64::from_be_bytes( + full_key.user_key.table_key.as_ref()[0..8] + .try_into() + .unwrap() + ), ); } // Update restart point if needed and calculate diff key. @@ -551,8 +563,8 @@ impl BlockBuilder { /// # Format /// /// ```plain - /// compressed: | entries | restart point 0 (4B) | ... | restart point N-1 (4B) | N (4B) | restart point index 0 (5B)| ... | restart point index N-1 (5B) | N (4B) - /// uncompressed: | compression method (1B) | crc32sum (4B) | + /// compressed: | entries | restart point 0 (4B) | ... | restart point N-1 (4B) | N (4B) | restart point index 0 (5B)| ... | restart point index N-1 (5B) | N (4B) | table id (4B) + /// uncompressed: | compression method (1B) | xxhash64 checksum (8B) | /// ``` /// /// # Panics @@ -588,44 +600,70 @@ impl BlockBuilder { )); self.buf.put_u32_le(self.table_id.unwrap()); - match self.compression_algorithm { - CompressionAlgorithm::None => (), + if self.compression_algorithm != CompressionAlgorithm::None { + self.buf = Self::compress(&self.buf[..], self.compression_algorithm); + } + + self.compression_algorithm.encode(&mut self.buf); + let checksum = xxhash64_checksum(&self.buf); + self.buf.put_u64_le(checksum); + assert!(self.buf.len() < (u32::MAX) as usize); + + self.buf.as_ref() + } + + pub fn compress_block( + buf: Bytes, + target_compression: CompressionAlgorithm, + ) -> HummockResult { + // Verify checksum. + let checksum = (&buf[buf.len() - 8..]).get_u64_le(); + xxhash64_verify(&buf[..buf.len() - 8], checksum)?; + // Decompress. + let compression = CompressionAlgorithm::decode(&mut &buf[buf.len() - 9..buf.len() - 8])?; + let compressed_data = &buf[..buf.len() - 9]; + assert_eq!(compression, CompressionAlgorithm::None); + let mut writer = Self::compress(compressed_data, target_compression); + + target_compression.encode(&mut writer); + let checksum = xxhash64_checksum(&writer); + writer.put_u64_le(checksum); + Ok(writer.freeze()) + } + + pub fn compress(buf: &[u8], compression_algorithm: CompressionAlgorithm) -> BytesMut { + match compression_algorithm { + CompressionAlgorithm::None => unreachable!(), CompressionAlgorithm::Lz4 => { let mut encoder = lz4::EncoderBuilder::new() .level(4) - .build(BytesMut::with_capacity(self.buf.len()).writer()) + .build(BytesMut::with_capacity(buf.len()).writer()) .map_err(HummockError::encode_error) .unwrap(); encoder - .write_all(&self.buf[..]) + .write_all(buf) .map_err(HummockError::encode_error) .unwrap(); let (writer, result) = encoder.finish(); result.map_err(HummockError::encode_error).unwrap(); - self.buf = writer.into_inner(); + writer.into_inner() } CompressionAlgorithm::Zstd => { let mut encoder = - zstd::Encoder::new(BytesMut::with_capacity(self.buf.len()).writer(), 4) + zstd::Encoder::new(BytesMut::with_capacity(buf.len()).writer(), 4) .map_err(HummockError::encode_error) .unwrap(); encoder - .write_all(&self.buf[..]) + .write_all(buf) .map_err(HummockError::encode_error) .unwrap(); let writer = encoder .finish() .map_err(HummockError::encode_error) .unwrap(); - self.buf = writer.into_inner(); + writer.into_inner() } - }; - - self.compression_algorithm.encode(&mut self.buf); - let checksum = xxhash64_checksum(&self.buf); - self.buf.put_u64_le(checksum); - - self.buf.as_ref() + } } /// Approximate block len (uncompressed). diff --git a/src/storage/src/hummock/sstable/builder.rs b/src/storage/src/hummock/sstable/builder.rs index c35417b9d5c04..0cf7c2fd850a7 100644 --- a/src/storage/src/hummock/sstable/builder.rs +++ b/src/storage/src/hummock/sstable/builder.rs @@ -16,26 +16,29 @@ use std::cmp; use std::collections::BTreeSet; use std::sync::Arc; -use bytes::BytesMut; +use bytes::{Bytes, BytesMut}; use risingwave_hummock_sdk::key::{user_key, FullKey, MAX_KEY_LEN}; use risingwave_hummock_sdk::table_stats::{TableStats, TableStatsMap}; use risingwave_hummock_sdk::{HummockEpoch, KeyComparator, LocalSstableInfo}; -use risingwave_pb::hummock::SstableInfo; +use risingwave_pb::hummock::{BloomFilterType, SstableInfo}; use super::utils::CompressionAlgorithm; use super::{ BlockBuilder, BlockBuilderOptions, BlockMeta, MonotonicDeleteEvent, SstableMeta, SstableWriter, - DEFAULT_ENTRY_SIZE, DEFAULT_RESTART_INTERVAL, VERSION, + DEFAULT_BLOCK_SIZE, DEFAULT_ENTRY_SIZE, DEFAULT_RESTART_INTERVAL, VERSION, }; use crate::filter_key_extractor::{FilterKeyExtractorImpl, FullKeyFilterKeyExtractor}; use crate::hummock::sstable::{utils, FilterBuilder}; use crate::hummock::value::HummockValue; -use crate::hummock::{HummockResult, MemoryLimiter, Xor16FilterBuilder, DEFAULT_BLOCK_SIZE}; +use crate::hummock::{ + Block, BlockHolder, BlockIterator, HummockResult, MemoryLimiter, Xor16FilterBuilder, +}; use crate::opts::StorageOpts; pub const DEFAULT_SSTABLE_SIZE: usize = 4 * 1024 * 1024; pub const DEFAULT_BLOOM_FALSE_POSITIVE: f64 = 0.001; pub const DEFAULT_MAX_SST_SIZE: u64 = 512 * 1024 * 1024; +pub const MIN_BLOCK_SIZE: usize = 8 * 1024; #[derive(Clone, Debug)] pub struct SstableBuilderOptions { @@ -121,10 +124,6 @@ pub struct SstableBuilder { last_table_id: Option, sstable_id: u64, - /// `stale_key_count` counts range_tombstones as well. - stale_key_count: u64, - /// `total_key_count` counts range_tombstones as well. - total_key_count: u64, /// Per table stats. table_stats: TableStatsMap, /// `last_table_stats` accumulates stats for `last_table_id` and finalizes it in `table_stats` @@ -135,7 +134,6 @@ pub struct SstableBuilder { filter_builder: F, epoch_set: BTreeSet, - memory_limiter: Option>, } @@ -179,8 +177,6 @@ impl SstableBuilder { monotonic_deletes: vec![], sstable_id, filter_key_extractor, - stale_key_count: 0, - total_key_count: 0, table_stats: Default::default(), last_table_stats: Default::default(), range_tombstone_size: 0, @@ -214,8 +210,6 @@ impl SstableBuilder { if event.new_epoch != HummockEpoch::MAX { self.epoch_set.insert(event.new_epoch); } - self.stale_key_count += 1; - self.total_key_count += 1; self.range_tombstone_size += event.encoded_size(); self.monotonic_deletes.push(event); } @@ -235,9 +229,49 @@ impl SstableBuilder { &mut self, full_key: FullKey<&[u8]>, value: HummockValue<&[u8]>, - is_new_user_key: bool, ) -> HummockResult<()> { - self.add(full_key, value, is_new_user_key).await + self.add(full_key, value).await + } + + /// Add raw data of block to sstable. return false means fallback + pub async fn add_raw_block( + &mut self, + buf: Bytes, + filter_data: Vec, + smallest_key: FullKey>, + largest_key: Vec, + mut meta: BlockMeta, + ) -> HummockResult { + let table_id = smallest_key.user_key.table_id.table_id; + if self.last_table_id.is_none() || self.last_table_id.unwrap() != table_id { + self.table_ids.insert(table_id); + self.finalize_last_table_stats(); + self.last_table_id = Some(table_id); + } + if !self.block_builder.is_empty() { + let min_block_size = std::cmp::min(MIN_BLOCK_SIZE, self.options.block_capacity / 4); + if self.block_builder.approximate_len() < min_block_size { + let block = Block::decode(buf, meta.uncompressed_size as usize)?; + let mut iter = BlockIterator::new(BlockHolder::from_owned_block(Box::new(block))); + iter.seek_to_first(); + while iter.is_valid() { + let value = HummockValue::from_slice(iter.value()) + .expect("decode failed for fast compact"); + self.add_impl(iter.key(), value, false).await?; + iter.next(); + } + return Ok(false); + } + self.build_block().await?; + } + self.last_full_key = largest_key; + assert_eq!(meta.len as usize, buf.len()); + meta.offset = self.writer.data_len() as u32; + self.block_metas.push(meta); + self.filter_builder.add_raw_data(filter_data); + let block_meta = self.block_metas.last_mut().unwrap(); + self.writer.write_block_bytes(buf, block_meta).await?; + Ok(true) } /// Add kv pair to sstable. @@ -245,11 +279,18 @@ impl SstableBuilder { &mut self, full_key: FullKey<&[u8]>, value: HummockValue<&[u8]>, - is_new_user_key: bool, ) -> HummockResult<()> { - const LARGE_KEY_LEN: usize = MAX_KEY_LEN >> 1; + self.add_impl(full_key, value, true).await + } - let mut is_new_table = false; + /// Add kv pair to sstable. + async fn add_impl( + &mut self, + full_key: FullKey<&[u8]>, + value: HummockValue<&[u8]>, + could_switch_block: bool, + ) -> HummockResult<()> { + const LARGE_KEY_LEN: usize = MAX_KEY_LEN >> 1; let table_key_len = full_key.user_key.table_key.as_ref().len(); if table_key_len >= LARGE_KEY_LEN { @@ -265,34 +306,27 @@ impl SstableBuilder { // TODO: refine me full_key.encode_into(&mut self.raw_key); value.encode(&mut self.raw_value); - if is_new_user_key { - if value.is_delete() { - self.stale_key_count += 1; - } - let table_id = full_key.user_key.table_id.table_id(); - is_new_table = self.last_table_id.is_none() || self.last_table_id.unwrap() != table_id; - if is_new_table { - self.table_ids.insert(table_id); - self.finalize_last_table_stats(); - self.last_table_id = Some(table_id); - if !self.block_builder.is_empty() { - self.build_block().await?; - } - } else if self.block_builder.approximate_len() >= self.options.block_capacity { + let is_new_user_key = self.last_full_key.is_empty() + || !user_key(&self.raw_key).eq(user_key(&self.last_full_key)); + let table_id = full_key.user_key.table_id.table_id(); + let is_new_table = self.last_table_id.is_none() || self.last_table_id.unwrap() != table_id; + if is_new_table { + assert!(could_switch_block); + self.table_ids.insert(table_id); + self.finalize_last_table_stats(); + self.last_table_id = Some(table_id); + if !self.block_builder.is_empty() { self.build_block().await?; } - } else { - self.stale_key_count += 1; + } else if is_new_user_key + && self.block_builder.approximate_len() >= self.options.block_capacity + && could_switch_block + { + self.build_block().await?; } - self.total_key_count += 1; self.last_table_stats.total_key_count += 1; - self.epoch_set.insert(full_key.epoch); - if is_new_table && !self.block_builder.is_empty() { - self.build_block().await?; - } - // Rotate block builder if the previous one has been built. if self.block_builder.is_empty() { self.block_metas.push(BlockMeta { @@ -300,7 +334,9 @@ impl SstableBuilder { len: 0, smallest_key: full_key.encode(), uncompressed_size: 0, - }) + total_key_count: 0, + stale_key_count: 0, + }); } let table_id = full_key.user_key.table_id.table_id(); @@ -311,6 +347,10 @@ impl SstableBuilder { self.filter_builder.add_key(extract_key, table_id); } self.block_builder.add(full_key, self.raw_value.as_ref()); + self.block_metas.last_mut().unwrap().total_key_count += 1; + if !is_new_user_key || value.is_delete() { + self.block_metas.last_mut().unwrap().stale_key_count += 1; + } self.last_table_stats.total_key_size += full_key.encoded_len() as i64; self.last_table_stats.total_value_size += value.encoded_len() as i64; @@ -324,9 +364,6 @@ impl SstableBuilder { /// Finish building sst. /// - /// Unlike most LSM-Tree implementations, sstable meta and data are encoded separately. - /// Both meta and data has its own object (file). - /// /// # Format /// /// data: @@ -396,12 +433,28 @@ impl SstableBuilder { .encode(); } } + let bloom_filter_kind = if self.filter_builder.support_blocked_raw_data() { + BloomFilterType::Blocked + } else { + BloomFilterType::Sstable + }; let bloom_filter = if self.options.bloom_false_positive > 0.0 { self.filter_builder.finish(self.memory_limiter.clone()) } else { vec![] }; + let total_key_count = self + .block_metas + .iter() + .map(|block_meta| block_meta.total_key_count as u64) + .sum::() + + self.monotonic_deletes.len() as u64; + let stale_key_count = self + .block_metas + .iter() + .map(|block_meta| block_meta.stale_key_count as u64) + .sum::(); let uncompressed_file_size = self .block_metas .iter() @@ -412,7 +465,7 @@ impl SstableBuilder { block_metas: self.block_metas, bloom_filter, estimated_size: 0, - key_count: utils::checked_into_u32(self.total_key_count), + key_count: utils::checked_into_u32(total_key_count), smallest_key, largest_key, version: VERSION, @@ -484,6 +537,7 @@ impl SstableBuilder { let sst_info = SstableInfo { object_id: self.sstable_id, sst_id: self.sstable_id, + bloom_filter_kind: bloom_filter_kind as i32, key_range: Some(risingwave_pb::hummock::KeyRange { left: meta.smallest_key.clone(), right: meta.largest_key.clone(), @@ -492,8 +546,8 @@ impl SstableBuilder { file_size: meta.estimated_size as u64, table_ids: self.table_ids.into_iter().collect(), meta_offset: meta.meta_offset, - stale_key_count: self.stale_key_count, - total_key_count: self.total_key_count, + stale_key_count, + total_key_count, uncompressed_file_size: uncompressed_file_size + meta.encoded_size() as u64, min_epoch: cmp::min(min_epoch, tombstone_min_epoch), max_epoch: cmp::max(max_epoch, tombstone_max_epoch), @@ -503,8 +557,8 @@ impl SstableBuilder { "meta_size {} bloom_filter_size {} add_key_counts {} stale_key_count {} min_epoch {} max_epoch {} epoch_count {}", meta.encoded_size(), meta.bloom_filter.len(), - self.total_key_count, - self.stale_key_count, + total_key_count, + stale_key_count, min_epoch, max_epoch, self.epoch_set.len() @@ -532,7 +586,6 @@ impl SstableBuilder { async fn build_block(&mut self) -> HummockResult<()> { // Skip empty block. if self.block_builder.is_empty() { - self.block_metas.pop(); return Ok(()); } @@ -554,12 +607,8 @@ impl SstableBuilder { Ok(()) } - pub fn len(&self) -> usize { - self.total_key_count as usize - } - pub fn is_empty(&self) -> bool { - self.total_key_count > 0 + self.range_tombstone_size > 0 || self.writer.data_len() > 0 } /// Returns true if we roughly reached capacity @@ -587,17 +636,22 @@ pub(super) mod tests { use std::collections::Bound; use risingwave_common::catalog::TableId; + use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::key::UserKey; use super::*; use crate::assert_bytes_eq; + use crate::filter_key_extractor::{DummyFilterKeyExtractor, MultiFilterKeyExtractor}; use crate::hummock::iterator::test_utils::mock_sstable_store; use crate::hummock::sstable::xor_filter::BlockedXor16FilterBuilder; use crate::hummock::test_utils::{ default_builder_opt_for_test, gen_test_sstable_impl, mock_sst_writer, test_key_of, test_value_of, TEST_KEYS_COUNT, }; - use crate::hummock::{CachePolicy, Sstable, Xor16FilterBuilder, Xor8FilterBuilder}; + use crate::hummock::{ + CachePolicy, Sstable, SstableWriterOptions, Xor16FilterBuilder, Xor8FilterBuilder, + }; + use crate::monitor::StoreLocalStatistic; #[tokio::test] async fn test_empty() { @@ -650,7 +704,6 @@ pub(super) mod tests { b.add_for_test( test_key_of(i).to_ref(), HummockValue::put(&test_value_of(i)), - true, ) .await .unwrap(); @@ -687,24 +740,28 @@ pub(super) mod tests { // build remote table let sstable_store = mock_sstable_store(); - let (table, _) = gen_test_sstable_impl::, F>( + let sst_info = gen_test_sstable_impl::, F>( opts, 0, (0..TEST_KEYS_COUNT).map(|i| (test_key_of(i), HummockValue::put(test_value_of(i)))), vec![], - sstable_store, + sstable_store.clone(), CachePolicy::NotFill, ) .await; + let table = sstable_store + .sstable(&sst_info, &mut StoreLocalStatistic::default()) + .await + .unwrap(); - assert_eq!(table.has_bloom_filter(), with_blooms); + assert_eq!(table.value().has_bloom_filter(), with_blooms); for i in 0..key_count { let full_key = test_key_of(i); - if table.has_bloom_filter() { + if table.value().has_bloom_filter() { let hash = Sstable::hash_for_bloom_filter(full_key.user_key.encode().as_slice(), 0); let key_ref = full_key.user_key.as_ref(); assert!( - table.may_match_hash( + table.value().may_match_hash( &(Bound::Included(key_ref), Bound::Included(key_ref)), hash ), @@ -722,4 +779,70 @@ pub(super) mod tests { test_with_bloom_filter::(true).await; test_with_bloom_filter::(true).await; } + + #[tokio::test] + async fn test_no_bloom_filter_block() { + let opts = SstableBuilderOptions::default(); + // build remote table + let sstable_store = mock_sstable_store(); + let writer_opts = SstableWriterOptions::default(); + let object_id = 1; + let writer = sstable_store + .clone() + .create_sst_writer(object_id, writer_opts); + let mut filter = MultiFilterKeyExtractor::default(); + filter.register( + 1, + Arc::new(FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)), + ); + filter.register( + 2, + Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + ); + filter.register( + 3, + Arc::new(FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)), + ); + let mut builder = SstableBuilder::new( + object_id, + writer, + BlockedXor16FilterBuilder::new(1024), + opts, + Arc::new(FilterKeyExtractorImpl::Multi(filter)), + None, + ); + + let key_count: usize = 10000; + for table_id in 1..4 { + let mut table_key = VirtualNode::ZERO.to_be_bytes().to_vec(); + for idx in 0..key_count { + table_key.resize(VirtualNode::SIZE, 0); + table_key.extend_from_slice(format!("key_test_{:05}", idx * 2).as_bytes()); + let k = UserKey::for_test(TableId::new(table_id), table_key.as_ref()); + let v = test_value_of(idx); + builder + .add(FullKey::from_user_key(k, 1), HummockValue::put(v.as_ref())) + .await + .unwrap(); + } + } + let ret = builder.finish().await.unwrap(); + let sst_info = ret.sst_info.sst_info.clone(); + ret.writer_output.await.unwrap().unwrap(); + let table = sstable_store + .sstable(&sst_info, &mut StoreLocalStatistic::default()) + .await + .unwrap(); + let mut table_key = VirtualNode::ZERO.to_be_bytes().to_vec(); + for idx in 0..key_count { + table_key.resize(VirtualNode::SIZE, 0); + table_key.extend_from_slice(format!("key_test_{:05}", idx * 2).as_bytes()); + let k = UserKey::for_test(TableId::new(2), table_key.as_slice()); + let hash = Sstable::hash_for_bloom_filter(&k.encode(), 2); + let key_ref = k.as_ref(); + assert!(table + .value() + .may_match_hash(&(Bound::Included(key_ref), Bound::Included(key_ref)), hash)); + } + } } diff --git a/src/storage/src/hummock/sstable/filter.rs b/src/storage/src/hummock/sstable/filter.rs index 8930e2ed68ace..ac1cef4681d26 100644 --- a/src/storage/src/hummock/sstable/filter.rs +++ b/src/storage/src/hummock/sstable/filter.rs @@ -30,4 +30,12 @@ pub trait FilterBuilder: Send { fn switch_block(&mut self, _memory_limiter: Option>) {} /// approximate memory when finish filter fn approximate_building_memory(&self) -> usize; + + /// Add raw data which build by keys directly. Please make sure that you have finished the last + /// block by calling `switch_block` + fn add_raw_data(&mut self, _raw: Vec) {} + + fn support_blocked_raw_data(&self) -> bool { + false + } } diff --git a/src/storage/src/hummock/sstable/forward_sstable_iterator.rs b/src/storage/src/hummock/sstable/forward_sstable_iterator.rs index edb6c372ba31c..3988d082177f8 100644 --- a/src/storage/src/hummock/sstable/forward_sstable_iterator.rs +++ b/src/storage/src/hummock/sstable/forward_sstable_iterator.rs @@ -235,7 +235,7 @@ impl SstableIterator { idx: usize, seek_key: Option>, ) -> HummockResult<()> { - tracing::trace!( + tracing::debug!( target: "events::storage::sstable::block_seek", "table iterator seek: sstable_object_id = {}, block_id = {}", self.sst.value().id, diff --git a/src/storage/src/hummock/sstable/mod.rs b/src/storage/src/hummock/sstable/mod.rs index e7e11ace8a2fb..dfa4a5c0a0095 100644 --- a/src/storage/src/hummock/sstable/mod.rs +++ b/src/storage/src/hummock/sstable/mod.rs @@ -19,7 +19,7 @@ mod block; use std::collections::BTreeSet; use std::fmt::{Debug, Formatter}; -use std::ops::{BitXor, Bound}; +use std::ops::{BitXor, Bound, Range}; pub use block::*; mod block_iterator; @@ -27,13 +27,13 @@ pub use block_iterator::*; mod bloom; mod xor_filter; pub use bloom::BloomFilterBuilder; -use xor_filter::XorFilterReader; -pub use xor_filter::{BlockedXor16FilterBuilder, Xor16FilterBuilder, Xor8FilterBuilder}; +pub use xor_filter::{ + BlockedXor16FilterBuilder, Xor16FilterBuilder, Xor8FilterBuilder, XorFilterReader, +}; pub mod builder; pub use builder::*; pub mod writer; use risingwave_common::catalog::TableId; -use risingwave_object_store::object::BlockLocation; pub use writer::*; mod forward_sstable_iterator; pub mod multi_builder; @@ -71,7 +71,8 @@ use crate::store::ReadOptions; const DEFAULT_META_BUFFER_CAPACITY: usize = 4096; const MAGIC: u32 = 0x5785ab73; -const VERSION: u32 = 1; +const OLD_VERSION: u32 = 1; +const VERSION: u32 = 2; #[derive(Clone, PartialEq, Eq, Debug)] // delete keys located in [start_user_key, end_user_key) @@ -247,7 +248,6 @@ impl Sstable { pub fn new(id: HummockSstableObjectId, mut meta: SstableMeta) -> Self { let filter_data = std::mem::take(&mut meta.bloom_filter); let filter_reader = XorFilterReader::new(&filter_data, &meta.block_metas); - Self { id, meta, @@ -260,14 +260,12 @@ impl Sstable { !self.filter_reader.is_empty() } - pub fn calculate_block_info(&self, block_index: usize) -> (BlockLocation, usize) { + pub fn calculate_block_info(&self, block_index: usize) -> (Range, usize) { let block_meta = &self.meta.block_metas[block_index]; - let block_loc = BlockLocation { - offset: block_meta.offset as usize, - size: block_meta.len as usize, - }; + let range = + block_meta.offset as usize..block_meta.offset as usize + block_meta.len as usize; let uncompressed_capacity = block_meta.uncompressed_size as usize; - (block_loc, uncompressed_capacity) + (range, uncompressed_capacity) } #[inline(always)] @@ -317,24 +315,28 @@ impl Sstable { } } -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Default, Debug, Eq, PartialEq)] pub struct BlockMeta { pub smallest_key: Vec, pub offset: u32, pub len: u32, pub uncompressed_size: u32, + pub total_key_count: u32, + pub stale_key_count: u32, } impl BlockMeta { /// Format: /// /// ```plain - /// | offset (4B) | len (4B) | smallest key len (4B) | smallest key | + /// | offset (4B) | len (4B) | uncompressed size (4B) | smallest key len (4B) | smallest key | /// ``` pub fn encode(&self, buf: &mut Vec) { buf.put_u32_le(self.offset); buf.put_u32_le(self.len); buf.put_u32_le(self.uncompressed_size); + buf.put_u32_le(self.total_key_count); + buf.put_u32_le(self.stale_key_count); put_length_prefixed_slice(buf, &self.smallest_key); } @@ -342,18 +344,40 @@ impl BlockMeta { let offset = buf.get_u32_le(); let len = buf.get_u32_le(); let uncompressed_size = buf.get_u32_le(); + + let total_key_count = buf.get_u32_le(); + let stale_key_count = buf.get_u32_le(); + let smallest_key = get_length_prefixed_slice(buf); + Self { + smallest_key, + offset, + len, + uncompressed_size, + total_key_count, + stale_key_count, + } + } + + pub fn decode_from_v1(buf: &mut &[u8]) -> Self { + let offset = buf.get_u32_le(); + let len = buf.get_u32_le(); + let uncompressed_size = buf.get_u32_le(); + let total_key_count = 0; + let stale_key_count = 0; let smallest_key = get_length_prefixed_slice(buf); Self { smallest_key, offset, len, uncompressed_size, + total_key_count, + stale_key_count, } } #[inline] pub fn encoded_size(&self) -> usize { - 16 /* offset + len + key len + uncompressed size */ + self.smallest_key.len() + 24 /* offset + len + key len + uncompressed size + total key count + stale key count */ + self.smallest_key.len() } pub fn table_id(&self) -> TableId { @@ -361,7 +385,7 @@ impl BlockMeta { } } -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Default, Clone, PartialEq, Eq, Debug)] pub struct SstableMeta { pub block_metas: Vec, pub bloom_filter: Vec, @@ -444,7 +468,7 @@ impl SstableMeta { cursor -= 4; let version = (&buf[cursor..cursor + 4]).get_u32_le(); - if version != VERSION { + if version != VERSION && version != OLD_VERSION { return Err(HummockError::invalid_format_version(version)); } @@ -455,9 +479,16 @@ impl SstableMeta { let block_meta_count = buf.get_u32_le() as usize; let mut block_metas = Vec::with_capacity(block_meta_count); - for _ in 0..block_meta_count { - block_metas.push(BlockMeta::decode(buf)); + if version == OLD_VERSION { + for _ in 0..block_meta_count { + block_metas.push(BlockMeta::decode_from_v1(buf)); + } + } else { + for _ in 0..block_meta_count { + block_metas.push(BlockMeta::decode(buf)); + } } + let bloom_filter = get_length_prefixed_slice(buf); let estimated_size = buf.get_u32_le(); let key_count = buf.get_u32_le(); @@ -538,15 +569,14 @@ mod tests { block_metas: vec![ BlockMeta { smallest_key: b"0-smallest-key".to_vec(), - offset: 0, len: 100, - uncompressed_size: 0, + ..Default::default() }, BlockMeta { smallest_key: b"5-some-key".to_vec(), offset: 100, len: 100, - uncompressed_size: 0, + ..Default::default() }, ], bloom_filter: b"0123456789".to_vec(), diff --git a/src/storage/src/hummock/sstable/multi_builder.rs b/src/storage/src/hummock/sstable/multi_builder.rs index 31b38d66f8fc0..9bee67e78ca68 100644 --- a/src/storage/src/hummock/sstable/multi_builder.rs +++ b/src/storage/src/hummock/sstable/multi_builder.rs @@ -16,6 +16,7 @@ use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering::SeqCst; use std::sync::Arc; +use bytes::Bytes; use num_integer::Integer; use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::key::{FullKey, PointRange, UserKey}; @@ -28,7 +29,7 @@ use crate::hummock::sstable::filter::FilterBuilder; use crate::hummock::sstable_store::SstableStoreRef; use crate::hummock::value::HummockValue; use crate::hummock::{ - BatchUploadWriter, CachePolicy, HummockResult, MemoryLimiter, SstableBuilder, + BatchUploadWriter, BlockMeta, CachePolicy, HummockResult, MemoryLimiter, SstableBuilder, SstableBuilderOptions, SstableWriter, SstableWriterOptions, Xor16FilterBuilder, }; use crate::monitor::CompactorMetrics; @@ -150,6 +151,30 @@ where self.add_full_key(full_key, value, is_new_user_key).await } + pub async fn add_raw_block( + &mut self, + buf: Bytes, + filter_data: Vec, + smallest_key: FullKey>, + largest_key: Vec, + block_meta: BlockMeta, + ) -> HummockResult { + if self.current_builder.is_none() { + if let Some(progress) = &self.task_progress { + progress + .num_pending_write_io + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + } + let builder = self.builder_factory.open_builder().await?; + self.current_builder = Some(builder); + } + + let builder = self.current_builder.as_mut().unwrap(); + builder + .add_raw_block(buf, filter_data, smallest_key, largest_key, block_meta) + .await + } + /// Adds a key-value pair to the underlying builders. /// /// If `allow_split` and the current builder reaches its capacity, this function will create a @@ -224,7 +249,7 @@ where } let builder = self.current_builder.as_mut().unwrap(); - builder.add(full_key, value, is_new_user_key).await + builder.add(full_key, value).await } pub fn check_table_and_vnode_change(&mut self, user_key: &UserKey<&[u8]>) -> (bool, bool) { diff --git a/src/storage/src/hummock/sstable/sstable_object_id_manager.rs b/src/storage/src/hummock/sstable/sstable_object_id_manager.rs index 6ae7ddad4a7ea..69ca3712eb379 100644 --- a/src/storage/src/hummock/sstable/sstable_object_id_manager.rs +++ b/src/storage/src/hummock/sstable/sstable_object_id_manager.rs @@ -22,8 +22,9 @@ use std::sync::Arc; use itertools::Itertools; use parking_lot::Mutex; use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId, SstObjectIdRange}; +use risingwave_pb::hummock::GetNewSstIdsRequest; use risingwave_pb::meta::heartbeat_request::extra_info::Info; -use risingwave_rpc_client::{ExtraInfoSource, HummockMetaClient}; +use risingwave_rpc_client::{ExtraInfoSource, GrpcCompactorProxyClient, HummockMetaClient}; use sync_point::sync_point; use tokio::sync::oneshot; @@ -198,25 +199,95 @@ impl GetObjectId for Arc { } } +struct SharedComapctorObjectIdManagerCore { + output_object_ids: VecDeque, + client: Option, + sstable_id_remote_fetch_number: u32, +} +impl SharedComapctorObjectIdManagerCore { + pub fn new( + output_object_ids: VecDeque, + client: GrpcCompactorProxyClient, + sstable_id_remote_fetch_number: u32, + ) -> Self { + Self { + output_object_ids, + client: Some(client), + sstable_id_remote_fetch_number, + } + } + + pub fn for_test(output_object_ids: VecDeque) -> Self { + Self { + output_object_ids, + client: None, + sstable_id_remote_fetch_number: 0, + } + } +} /// `SharedComapctorObjectIdManager` is used to get output sst id for serverless compaction. #[derive(Clone)] pub struct SharedComapctorObjectIdManager { - output_object_ids: VecDeque, + core: Arc>, } impl SharedComapctorObjectIdManager { - pub fn new(output_object_ids: VecDeque) -> Self { - Self { output_object_ids } + pub fn new( + output_object_ids: VecDeque, + client: GrpcCompactorProxyClient, + sstable_id_remote_fetch_number: u32, + ) -> Self { + Self { + core: Arc::new(tokio::sync::Mutex::new( + SharedComapctorObjectIdManagerCore::new( + output_object_ids, + client, + sstable_id_remote_fetch_number, + ), + )), + } + } + + pub fn for_test(output_object_ids: VecDeque) -> Self { + Self { + core: Arc::new(tokio::sync::Mutex::new( + SharedComapctorObjectIdManagerCore::for_test(output_object_ids), + )), + } } } #[async_trait::async_trait] impl GetObjectId for SharedComapctorObjectIdManager { async fn get_new_sst_object_id(&mut self) -> HummockResult { - if let Some(first_element) = self.output_object_ids.pop_front() { + let mut guard = self.core.lock().await; + let core = guard.deref_mut(); + + if let Some(first_element) = core.output_object_ids.pop_front() { Ok(first_element) } else { - return Err(HummockError::other("Output object id runs out")); + tracing::warn!("The pre-allocated object ids are used up, and new object id are obtained through RPC."); + let request = GetNewSstIdsRequest { + number: core.sstable_id_remote_fetch_number, + }; + match core + .client + .as_mut() + .expect("GrpcCompactorProxyClient is None") + .get_new_sst_ids(request) + .await + { + Ok(response) => { + let resp = response.into_inner(); + let start_id = resp.start_id; + core.output_object_ids.extend((start_id + 1)..resp.end_id); + Ok(start_id) + } + Err(e) => Err(HummockError::other(format!( + "Fail to get new sst id, {}", + e + ))), + } } } } @@ -313,14 +384,10 @@ impl SstObjectIdTrackerInner { #[cfg(test)] mod test { - use std::collections::VecDeque; - use risingwave_common::try_match_expand; use crate::hummock::sstable::sstable_object_id_manager::AutoTrackerId; - use crate::hummock::{ - GetObjectId, SharedComapctorObjectIdManager, SstObjectIdTracker, TrackerId, - }; + use crate::hummock::{SstObjectIdTracker, TrackerId}; #[tokio::test] async fn test_object_id_tracker_basic() { @@ -390,18 +457,4 @@ mod test { object_id_tacker.remove_tracker(auto_id_3); assert!(object_id_tacker.tracking_object_ids().is_empty()); } - - #[tokio::test] - async fn test_shared_comapctor_object_id_manager() { - let mut pre_allocated_object_ids: VecDeque<_> = VecDeque::new(); - pre_allocated_object_ids.extend(vec![1, 3, 5]); - let mut object_id_manager = SharedComapctorObjectIdManager::new(pre_allocated_object_ids); - assert_eq!(object_id_manager.get_new_sst_object_id().await.unwrap(), 1); - - assert_eq!(object_id_manager.get_new_sst_object_id().await.unwrap(), 3); - - assert_eq!(object_id_manager.get_new_sst_object_id().await.unwrap(), 5); - - assert!(object_id_manager.get_new_sst_object_id().await.is_err()); - } } diff --git a/src/storage/src/hummock/sstable/utils.rs b/src/storage/src/hummock/sstable/utils.rs index 3e9aa73815254..61373289288ab 100644 --- a/src/storage/src/hummock/sstable/utils.rs +++ b/src/storage/src/hummock/sstable/utils.rs @@ -22,11 +22,11 @@ use xxhash_rust::xxh64; use super::{HummockError, HummockResult}; -unsafe fn u64(ptr: *const u8) -> u64 { +unsafe fn read_u64(ptr: *const u8) -> u64 { ptr::read_unaligned(ptr as *const u64) } -unsafe fn u32(ptr: *const u8) -> u32 { +unsafe fn read_u32(ptr: *const u8) -> u32 { ptr::read_unaligned(ptr as *const u32) } @@ -36,12 +36,12 @@ pub fn bytes_diff_below_max_key_length<'a>(base: &[u8], target: &'a [u8]) -> &'a let mut i = 0; unsafe { while i + 8 <= end { - if u64(base.as_ptr().add(i)) != u64(target.as_ptr().add(i)) { + if read_u64(base.as_ptr().add(i)) != read_u64(target.as_ptr().add(i)) { break; } i += 8; } - if i + 4 <= end && u32(base.as_ptr().add(i)) == u32(target.as_ptr().add(i)) { + if i + 4 <= end && read_u32(base.as_ptr().add(i)) == read_u32(target.as_ptr().add(i)) { i += 4; } while i < end { @@ -113,6 +113,16 @@ impl CompressionAlgorithm { } } +impl From for CompressionAlgorithm { + fn from(ca: u32) -> Self { + match ca { + 0 => CompressionAlgorithm::None, + 1 => CompressionAlgorithm::Lz4, + _ => CompressionAlgorithm::Zstd, + } + } +} + impl From for u8 { fn from(ca: CompressionAlgorithm) -> Self { match ca { diff --git a/src/storage/src/hummock/sstable/writer.rs b/src/storage/src/hummock/sstable/writer.rs index 33d7f1b0736bb..d5b5122a0a8d6 100644 --- a/src/storage/src/hummock/sstable/writer.rs +++ b/src/storage/src/hummock/sstable/writer.rs @@ -25,6 +25,8 @@ pub trait SstableWriter: Send { /// Write an SST block to the writer. async fn write_block(&mut self, block: &[u8], meta: &BlockMeta) -> HummockResult<()>; + async fn write_block_bytes(&mut self, block: Bytes, meta: &BlockMeta) -> HummockResult<()>; + /// Finish writing the SST. async fn finish(self, meta: SstableMeta) -> HummockResult; @@ -60,6 +62,11 @@ impl SstableWriter for InMemWriter { Ok(()) } + async fn write_block_bytes(&mut self, block: Bytes, _meta: &BlockMeta) -> HummockResult<()> { + self.buf.extend_from_slice(&block); + Ok(()) + } + async fn finish(mut self, meta: SstableMeta) -> HummockResult { meta.encode_to(&mut self.buf); Ok((Bytes::from(self.buf), meta)) @@ -94,7 +101,7 @@ mod tests { smallest_key: Vec::new(), len: 1000, offset: i * 1000, - uncompressed_size: 0, // dummy value + ..Default::default() }); blocks.push(data.slice((i * 1000) as usize..((i + 1) * 1000) as usize)); } diff --git a/src/storage/src/hummock/sstable/xor_filter.rs b/src/storage/src/hummock/sstable/xor_filter.rs index f415cfc0980a9..8d0564ebbf92d 100644 --- a/src/storage/src/hummock/sstable/xor_filter.rs +++ b/src/storage/src/hummock/sstable/xor_filter.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use bytes::{Buf, BufMut}; use itertools::Itertools; +use risingwave_common::must_match; use risingwave_hummock_sdk::key::{FullKey, UserKeyRangeRef}; use xorf::{Filter, Xor16, Xor8}; @@ -57,6 +58,21 @@ impl Xor16FilterBuilder { }; Self { key_hash_entries } } + + fn build_from_xor16(xor_filter: &Xor16) -> Vec { + let mut buf = Vec::with_capacity(8 + 4 + xor_filter.fingerprints.len() * 2 + 1); + buf.put_u64_le(xor_filter.seed); + buf.put_u32_le(xor_filter.block_length as u32); + xor_filter + .fingerprints + .iter() + .for_each(|x| buf.put_u16_le(*x)); + // We add an extra byte so we can distinguish bloom filter and xor filter by the last + // byte(255 indicates a xor16 filter, 254 indicates a xor8 filter and others indicate a + // bloom filter). + buf.put_u8(FOOTER_XOR16); + buf + } } impl FilterBuilder for Xor16FilterBuilder { @@ -79,18 +95,7 @@ impl FilterBuilder for Xor16FilterBuilder { let xor_filter = Xor16::from(&self.key_hash_entries); self.key_hash_entries.clear(); - let mut buf = Vec::with_capacity(8 + 4 + xor_filter.fingerprints.len() * 2 + 1); - buf.put_u64_le(xor_filter.seed); - buf.put_u32_le(xor_filter.block_length as u32); - xor_filter - .fingerprints - .iter() - .for_each(|x| buf.put_u16_le(*x)); - // We add an extra byte so we can distinguish bloom filter and xor filter by the last - // byte(255 indicates a xor16 filter, 254 indicates a xor8 filter and others indicate a - // bloom filter). - buf.put_u8(FOOTER_XOR16); - buf + Self::build_from_xor16(&xor_filter) } fn create(_fpr: f64, capacity: usize) -> Self { @@ -194,6 +199,17 @@ impl FilterBuilder for BlockedXor16FilterBuilder { fn approximate_building_memory(&self) -> usize { self.current.approximate_building_memory() } + + fn add_raw_data(&mut self, raw: Vec) { + assert!(self.current.key_hash_entries.is_empty()); + self.data.put_u32_le(raw.len() as u32); + self.data.extend(raw); + self.block_count += 1; + } + + fn support_blocked_raw_data(&self) -> bool { + true + } } pub struct BlockBasedXor16Filter { @@ -389,6 +405,15 @@ impl XorFilterReader { } } } + + pub fn get_block_raw_filter(&self, block_index: usize) -> Vec { + let reader = must_match!(&self.filter, XorFilter::BlockXor16(reader) => reader); + Xor16FilterBuilder::build_from_xor16(&reader.filters[block_index].1) + } + + pub fn is_block_based_filter(&self) -> bool { + matches!(self.filter, XorFilter::BlockXor16(_)) + } } impl Clone for XorFilterReader { @@ -466,7 +491,7 @@ mod tests { epoch, }; let v = HummockValue::put(test_value_of(i)); - builder.add(k.to_ref(), v.as_slice(), j == 0).await.unwrap(); + builder.add(k.to_ref(), v.as_slice()).await.unwrap(); } } let ret = builder.finish().await.unwrap(); diff --git a/src/storage/src/hummock/sstable_store.rs b/src/storage/src/hummock/sstable_store.rs index 987d9080ae563..73d6110cacd29 100644 --- a/src/storage/src/hummock/sstable_store.rs +++ b/src/storage/src/hummock/sstable_store.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. use std::clone::Clone; +use std::future::Future; use std::sync::atomic::Ordering; use std::sync::Arc; -use std::time::Duration; use await_tree::InstrumentAwait; use bytes::Bytes; @@ -26,7 +26,7 @@ use risingwave_common::config::StorageMemoryConfig; use risingwave_hummock_sdk::{HummockSstableObjectId, OBJECT_SUFFIX}; use risingwave_hummock_trace::TracedCachePolicy; use risingwave_object_store::object::{ - BlockLocation, MonitoredStreamingReader, ObjectError, ObjectMetadataIter, ObjectStoreRef, + MonitoredStreamingReader, ObjectError, ObjectMetadataIter, ObjectStoreRef, ObjectStreamingUploader, }; use risingwave_pb::hummock::SstableInfo; @@ -39,6 +39,7 @@ use super::{ Block, BlockCache, BlockMeta, BlockResponse, FileCache, RecentFilter, Sstable, SstableBlockIndex, SstableMeta, SstableWriter, }; +use crate::hummock::file_cache::preclude::*; use crate::hummock::multi_builder::UploadJoinHandle; use crate::hummock::{ BlockHolder, CacheableEntry, HummockError, HummockResult, LruCache, MemoryLimiter, @@ -105,7 +106,7 @@ impl LruCacheEventListener for BlockCacheEventListener { sst_id: key.0, block_idx: key.1, }; - self.data_file_cache.insert_without_wait(key, value); + self.data_file_cache.insert_async(key, value); } } @@ -116,7 +117,7 @@ impl LruCacheEventListener for MetaCacheEventListener { type T = Box; fn on_release(&self, key: Self::K, value: Self::T) { - self.0.insert_without_wait(key, value); + self.0.insert_async(key, value); } } @@ -129,7 +130,7 @@ pub struct SstableStore { data_file_cache: FileCache>, meta_file_cache: FileCache>, - data_file_cache_refill_filter: Option>>, + recent_filter: Option>>, } impl SstableStore { @@ -141,6 +142,7 @@ impl SstableStore { high_priority_ratio: usize, data_file_cache: FileCache>, meta_file_cache: FileCache>, + recent_filter: Option>>, ) -> Self { // TODO: We should validate path early. Otherwise object store won't report invalid path // error until first write attempt. @@ -152,11 +154,6 @@ impl SstableStore { data_file_cache: data_file_cache.clone(), }); let meta_cache_listener = Arc::new(MetaCacheEventListener(meta_file_cache.clone())); - let data_file_cache_refill_filter = if data_file_cache.is_filter_enabled() { - Some(Arc::new(RecentFilter::new(6, Duration::from_secs(10)))) - } else { - None - }; Self { path, @@ -177,7 +174,7 @@ impl SstableStore { data_file_cache, meta_file_cache, - data_file_cache_refill_filter, + recent_filter, } } @@ -198,7 +195,7 @@ impl SstableStore { data_file_cache: FileCache::none(), meta_file_cache: FileCache::none(), - data_file_cache_refill_filter: None, + recent_filter: None, } } @@ -208,7 +205,9 @@ impl SstableStore { .delete(self.get_sst_data_path(object_id).as_str()) .await?; self.meta_cache.erase(object_id, &object_id); - self.meta_file_cache.remove_without_wait(&object_id); + self.meta_file_cache + .remove(&object_id) + .map_err(HummockError::file_cache)?; Ok(()) } @@ -228,7 +227,9 @@ impl SstableStore { // Delete from cache. for &object_id in object_id_list { self.meta_cache.erase(object_id, &object_id); - self.meta_file_cache.remove_without_wait(&object_id); + self.meta_file_cache + .remove(&object_id) + .map_err(HummockError::file_cache)?; } Ok(()) @@ -236,7 +237,9 @@ impl SstableStore { pub fn delete_cache(&self, object_id: HummockSstableObjectId) { self.meta_cache.erase(object_id, &object_id); - self.meta_file_cache.remove_without_wait(&object_id); + if let Err(e) = self.meta_file_cache.remove(&object_id) { + tracing::warn!("meta file cache remove error: {}", e); + } } async fn put_sst_data( @@ -259,7 +262,7 @@ impl SstableStore { stats: &mut StoreLocalStatistic, ) -> HummockResult { let object_id = sst.id; - let (block_loc, uncompressed_capacity) = sst.calculate_block_info(block_index); + let (range, uncompressed_capacity) = sst.calculate_block_info(block_index); stats.cache_data_block_total += 1; let mut fetch_block = || { @@ -268,6 +271,7 @@ impl SstableStore { let data_path = self.get_sst_data_path(object_id); let store = self.store.clone(); let use_file_cache = !matches!(policy, CachePolicy::Disable); + let range = range.clone(); async move { let key = SstableBlockIndex { @@ -283,7 +287,7 @@ impl SstableStore { return Ok(block); } - let block_data = store.read(&data_path, Some(block_loc)).await?; + let block_data = store.read(&data_path, range).await?; let block = Box::new(Block::decode(block_data, uncompressed_capacity)?); Ok(block) @@ -301,7 +305,7 @@ impl SstableStore { policy }; - if let Some(filter) = self.data_file_cache_refill_filter.as_ref() { + if let Some(filter) = self.recent_filter.as_ref() { filter.insert(object_id); } @@ -314,7 +318,7 @@ impl SstableStore { )), CachePolicy::FillFileCache => { let block = fetch_block().await?; - self.data_file_cache.insert_without_wait( + self.data_file_cache.insert_async( SstableBlockIndex { sst_id: object_id, block_idx: block_index as u64, @@ -377,24 +381,25 @@ impl SstableStore { #[cfg(any(test, feature = "test"))] pub fn clear_block_cache(&self) { self.block_cache.clear(); - self.data_file_cache.clear_without_wait(); + if let Err(e) = self.data_file_cache.clear() { + tracing::warn!("data file cache clear error: {}", e); + } } #[cfg(any(test, feature = "test"))] pub fn clear_meta_cache(&self) { self.meta_cache.clear(); - self.meta_file_cache.clear_without_wait(); + if let Err(e) = self.meta_file_cache.clear() { + tracing::warn!("meta file cache clear error: {}", e); + } } - /// Returns `table_holder`, `local_cache_meta_block_miss` (1 if cache miss) and - /// `local_cache_meta_block_unhit` (1 if not cache hit). - pub async fn sstable_syncable( + /// Returns `table_holder` + pub fn sstable( &self, sst: &SstableInfo, - stats: &StoreLocalStatistic, - ) -> HummockResult<(TableHolder, u64, u64)> { - let mut local_cache_meta_block_miss = 0; - let mut local_cache_meta_block_unhit = 0; + stats: &mut StoreLocalStatistic, + ) -> impl Future> + Send + 'static { let object_id = sst.get_object_id(); let lookup_response = self .meta_cache @@ -406,12 +411,8 @@ impl SstableStore { let meta_file_cache = self.meta_file_cache.clone(); let store = self.store.clone(); let meta_path = self.get_sst_data_path(object_id); - local_cache_meta_block_miss += 1; let stats_ptr = stats.remote_io_time.clone(); - let loc = BlockLocation { - offset: sst.meta_offset as usize, - size: (sst.file_size - sst.meta_offset) as usize, - }; + let range = sst.meta_offset as usize..sst.file_size as usize; async move { if let Some(sst) = meta_file_cache .lookup(&object_id) @@ -424,10 +425,11 @@ impl SstableStore { let now = Instant::now(); let buf = store - .read(&meta_path, Some(loc)) + .read(&meta_path, range) .await .map_err(HummockError::object_io_error)?; let meta = SstableMeta::decode(&buf[..])?; + let sst = Sstable::new(object_id, meta); let charge = sst.estimate_size(); let add = (now.elapsed().as_secs_f64() * 1000.0).ceil(); @@ -436,35 +438,14 @@ impl SstableStore { } }, ); - if !matches!(lookup_response, LookupResponse::Cached(..)) { - local_cache_meta_block_unhit += 1; + match &lookup_response { + LookupResponse::Miss(_) | LookupResponse::WaitPendingRequest(_) => { + stats.cache_meta_block_miss += 1; + } + _ => (), } - let result = lookup_response - .verbose_instrument_await("meta_cache_lookup") - .await; - result.map(|table_holder| { - ( - table_holder, - local_cache_meta_block_miss, - local_cache_meta_block_unhit, - ) - }) - } - - // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. - // TODO: remove `allow` here after the issued is closed. - #[expect(clippy::needless_pass_by_ref_mut)] - pub async fn sstable( - &self, - sst: &SstableInfo, - stats: &mut StoreLocalStatistic, - ) -> HummockResult { - self.sstable_syncable(sst, stats).await.map( - |(table_holder, local_cache_meta_block_miss, ..)| { - stats.apply_meta_fetch(local_cache_meta_block_miss); - table_holder - }, - ) + stats.cache_meta_block_total += 1; + lookup_response.verbose_instrument_await("sstable") } pub async fn list_object_metadata_from_object_store( @@ -504,7 +485,7 @@ impl SstableStore { block_index: u64, block: Box, ) { - if let Some(filter) = self.data_file_cache_refill_filter.as_ref() { + if let Some(filter) = self.recent_filter.as_ref() { filter.insert(object_id); } self.block_cache @@ -539,54 +520,13 @@ impl SstableStore { )) } - pub fn data_file_cache_refill_filter( - &self, - ) -> Option<&Arc>> { - self.data_file_cache_refill_filter.as_ref() + pub fn data_recent_filter(&self) -> Option<&Arc>> { + self.recent_filter.as_ref() } pub fn data_file_cache(&self) -> &FileCache> { &self.data_file_cache } - - pub async fn may_fill_data_file_cache( - &self, - sst: &Sstable, - block_index: usize, - stats: &mut StoreLocalStatistic, - ) -> HummockResult { - let object_id = sst.id; - let (block_loc, uncompressed_capacity) = sst.calculate_block_info(block_index); - - stats.cache_data_block_total += 1; - let fetch_block = move || { - stats.cache_data_block_miss += 1; - let data_path = self.get_sst_data_path(object_id); - let store = self.store.clone(); - - async move { - let data = store.read(&data_path, Some(block_loc)).await?; - let block = Block::decode(data, uncompressed_capacity)?; - let block = Box::new(block); - - Ok(block) - } - }; - - if let Some(filter) = self.data_file_cache_refill_filter.as_ref() { - filter.insert(object_id); - } - - let key = SstableBlockIndex { - sst_id: object_id, - block_idx: block_index as u64, - }; - - self.data_file_cache - .insert_with(key, fetch_block, uncompressed_capacity) - .await - .map_err(HummockError::file_cache) - } } pub type SstableStoreRef = Arc; @@ -737,6 +677,15 @@ impl SstableWriter for BatchUploadWriter { Ok(()) } + async fn write_block_bytes(&mut self, block: Bytes, meta: &BlockMeta) -> HummockResult<()> { + self.buf.extend_from_slice(&block); + if let CachePolicy::Fill(_) = self.policy { + self.block_info + .push(Block::decode(block, meta.uncompressed_size as usize)?); + } + Ok(()) + } + async fn finish(mut self, meta: SstableMeta) -> HummockResult { fail_point!("data_upload_err"); let join_handle = tokio::spawn(async move { @@ -757,7 +706,7 @@ impl SstableWriter for BatchUploadWriter { .await?; self.sstable_store.insert_meta_cache(self.object_id, meta); - if let Some(filter) = self.sstable_store.data_file_cache_refill_filter.as_ref() { + if let Some(filter) = self.sstable_store.recent_filter.as_ref() { filter.insert(self.object_id); } @@ -832,6 +781,18 @@ impl SstableWriter for StreamingUploadWriter { .map_err(HummockError::object_io_error) } + async fn write_block_bytes(&mut self, block: Bytes, meta: &BlockMeta) -> HummockResult<()> { + self.data_len += block.len(); + if let CachePolicy::Fill(_) = self.policy { + let block = Block::decode(block.clone(), meta.uncompressed_size as usize)?; + self.blocks.push(block); + } + self.object_uploader + .write_bytes(block) + .await + .map_err(HummockError::object_io_error) + } + async fn finish(mut self, meta: SstableMeta) -> HummockResult { let meta_data = Bytes::from(meta.encode_to_bytes()); @@ -926,7 +887,7 @@ pub struct BlockStream { /// not contain the size of blocks which precede the first streamed block. That is, if /// streaming starts at block 2 of a given SST, then the list does not contain information /// about block 0 and block 1. - block_size_vec: Vec<(usize, usize)>, + block_metas: Vec, } impl BlockStream { @@ -948,28 +909,22 @@ impl BlockStream { // Avoids panicking if `block_index` is too large. let block_index = std::cmp::min(block_index, metas.len()); - let mut block_len_vec = Vec::with_capacity(metas.len() - block_index); - metas[block_index..].iter().for_each(|b_meta| { - block_len_vec.push((b_meta.len as usize, b_meta.uncompressed_size as usize)) - }); - Self { byte_stream, block_idx: 0, - block_size_vec: block_len_vec, + block_metas: metas[block_index..].to_vec(), } } /// Reads the next block from the stream and returns it. Returns `None` if there are no blocks /// left to read. - pub async fn next(&mut self) -> HummockResult>> { - if self.block_idx >= self.block_size_vec.len() { + pub async fn next(&mut self) -> HummockResult> { + if self.block_idx >= self.block_metas.len() { return Ok(None); } - let (block_stream_size, block_full_size) = - *self.block_size_vec.get(self.block_idx).unwrap(); - let mut buffer = vec![0; block_stream_size]; + let block_meta = &self.block_metas[self.block_idx]; + let mut buffer = vec![0; block_meta.len as usize]; fail_point!("stream_read_err", |_| Err(HummockError::object_io_error( ObjectError::internal("stream read error") ))); @@ -980,17 +935,26 @@ impl BlockStream { .await .map_err(|e| HummockError::object_io_error(ObjectError::internal(e)))?; - if bytes_read != block_stream_size { + if bytes_read != block_meta.len as usize { return Err(HummockError::decode_error(ObjectError::internal(format!( "unexpected number of bytes: expected: {} read: {}", - block_stream_size, bytes_read + block_meta.len, bytes_read )))); } - let boxed_block = Box::new(Block::decode(Bytes::from(buffer), block_full_size)?); self.block_idx += 1; - Ok(Some(boxed_block)) + Ok(Some((Bytes::from(buffer), block_meta.clone()))) + } + + pub async fn next_block(&mut self) -> HummockResult>> { + match self.next().await? { + None => Ok(None), + Some((buf, meta)) => Ok(Some(Box::new(Block::decode( + buf, + meta.uncompressed_size as usize, + )?))), + } } } diff --git a/src/storage/src/hummock/store/hummock_storage.rs b/src/storage/src/hummock/store/hummock_storage.rs new file mode 100644 index 0000000000000..5e51fa1170b12 --- /dev/null +++ b/src/storage/src/hummock/store/hummock_storage.rs @@ -0,0 +1,579 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::future::Future; +use std::ops::{Bound, Deref}; +use std::sync::atomic::{AtomicU64, Ordering as MemOrdering}; +use std::sync::Arc; +use std::time::Duration; + +use arc_swap::ArcSwap; +use bytes::Bytes; +use itertools::Itertools; +use more_asserts::assert_gt; +use risingwave_common::catalog::TableId; +use risingwave_common_service::observer_manager::{NotificationClient, ObserverManager}; +use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; +use risingwave_hummock_sdk::HummockReadEpoch; +#[cfg(any(test, feature = "test"))] +use risingwave_pb::hummock::HummockVersion; +use risingwave_pb::hummock::{version_update_payload, SstableInfo}; +use risingwave_rpc_client::HummockMetaClient; +use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; +use tokio::sync::oneshot; +use tracing::log::error; + +use super::local_hummock_storage::{HummockStorageIterator, LocalHummockStorage}; +use super::version::{CommittedVersion, HummockVersionReader}; +use crate::error::StorageResult; +use crate::filter_key_extractor::{FilterKeyExtractorManager, RpcFilterKeyExtractorManager}; +use crate::hummock::backup_reader::{BackupReader, BackupReaderRef}; +use crate::hummock::compactor::CompactorContext; +use crate::hummock::event_handler::hummock_event_handler::BufferTracker; +use crate::hummock::event_handler::refiller::CacheRefillConfig; +use crate::hummock::event_handler::{HummockEvent, HummockEventHandler, ReadVersionMappingType}; +use crate::hummock::local_version::pinned_version::{start_pinned_version_worker, PinnedVersion}; +use crate::hummock::observer_manager::HummockObserverNode; +use crate::hummock::store::version::read_filter_for_batch; +use crate::hummock::utils::{validate_safe_epoch, wait_for_epoch}; +use crate::hummock::write_limiter::{WriteLimiter, WriteLimiterRef}; +use crate::hummock::{ + HummockEpoch, HummockError, HummockResult, MemoryLimiter, SstableObjectIdManager, + SstableObjectIdManagerRef, SstableStoreRef, +}; +use crate::mem_table::ImmutableMemtable; +use crate::monitor::{CompactorMetrics, HummockStateStoreMetrics, StoreLocalStatistic}; +use crate::opts::StorageOpts; +use crate::store::*; +use crate::StateStore; + +struct HummockStorageShutdownGuard { + shutdown_sender: UnboundedSender, +} + +impl Drop for HummockStorageShutdownGuard { + fn drop(&mut self) { + let _ = self + .shutdown_sender + .send(HummockEvent::Shutdown) + .inspect_err(|e| error!("unable to send shutdown: {:?}", e)); + } +} + +/// `HummockStorage` is the entry point of the Hummock state store backend. +/// It implements the `StateStore` and `StateStoreRead` traits but not the `StateStoreWrite` trait +/// since all writes should be done via `LocalHummockStorage` to ensure the single writer property +/// of hummock. `LocalHummockStorage` instance can be created via `new_local` call. +/// Hummock is the state store backend. +#[derive(Clone)] +pub struct HummockStorage { + hummock_event_sender: UnboundedSender, + + context: CompactorContext, + + filter_key_extractor_manager: FilterKeyExtractorManager, + + sstable_object_id_manager: SstableObjectIdManagerRef, + + buffer_tracker: BufferTracker, + + version_update_notifier_tx: Arc>, + + seal_epoch: Arc, + + pinned_version: Arc>, + + hummock_version_reader: HummockVersionReader, + + _shutdown_guard: Arc, + + read_version_mapping: Arc, + + backup_reader: BackupReaderRef, + + /// current_epoch < min_current_epoch cannot be read. + min_current_epoch: Arc, + + write_limiter: WriteLimiterRef, +} + +impl HummockStorage { + /// Creates a [`HummockStorage`]. + #[allow(clippy::too_many_arguments)] + pub async fn new( + options: Arc, + sstable_store: SstableStoreRef, + hummock_meta_client: Arc, + notification_client: impl NotificationClient, + filter_key_extractor_manager: Arc, + state_store_metrics: Arc, + compactor_metrics: Arc, + ) -> HummockResult { + let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( + hummock_meta_client.clone(), + options.sstable_id_remote_fetch_number, + )); + let backup_reader = BackupReader::new( + &options.backup_storage_url, + &options.backup_storage_directory, + ) + .await + .map_err(HummockError::read_backup_error)?; + let write_limiter = Arc::new(WriteLimiter::default()); + let (event_tx, mut event_rx) = unbounded_channel(); + + let observer_manager = ObserverManager::new( + notification_client, + HummockObserverNode::new( + filter_key_extractor_manager.clone(), + backup_reader.clone(), + event_tx.clone(), + write_limiter.clone(), + ), + ) + .await; + observer_manager.start().await; + + let hummock_version = match event_rx.recv().await { + Some(HummockEvent::VersionUpdate(version_update_payload::Payload::PinnedVersion(version))) => version, + _ => unreachable!("the hummock observer manager is the first one to take the event tx. Should be full hummock version") + }; + + let (pin_version_tx, pin_version_rx) = unbounded_channel(); + let pinned_version = PinnedVersion::new(hummock_version, pin_version_tx); + tokio::spawn(start_pinned_version_worker( + pin_version_rx, + hummock_meta_client.clone(), + )); + let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( + filter_key_extractor_manager.clone(), + ); + let compactor_context = CompactorContext::new_local_compact_context( + options.clone(), + sstable_store.clone(), + compactor_metrics.clone(), + ); + + let seal_epoch = Arc::new(AtomicU64::new(pinned_version.max_committed_epoch())); + let min_current_epoch = Arc::new(AtomicU64::new(pinned_version.max_committed_epoch())); + let hummock_event_handler = HummockEventHandler::new( + event_tx.clone(), + event_rx, + pinned_version, + compactor_context.clone(), + filter_key_extractor_manager.clone(), + sstable_object_id_manager.clone(), + state_store_metrics.clone(), + CacheRefillConfig { + timeout: Duration::from_millis(options.cache_refill_timeout_ms), + data_refill_levels: options + .cache_refill_data_refill_levels + .iter() + .copied() + .collect(), + concurrency: options.cache_refill_concurrency, + unit: options.cache_refill_unit, + threshold: options.cache_refill_threshold, + }, + ); + + let instance = Self { + context: compactor_context, + filter_key_extractor_manager: filter_key_extractor_manager.clone(), + sstable_object_id_manager, + buffer_tracker: hummock_event_handler.buffer_tracker().clone(), + version_update_notifier_tx: hummock_event_handler.version_update_notifier_tx(), + seal_epoch, + hummock_event_sender: event_tx.clone(), + pinned_version: hummock_event_handler.pinned_version(), + hummock_version_reader: HummockVersionReader::new( + sstable_store, + state_store_metrics.clone(), + ), + _shutdown_guard: Arc::new(HummockStorageShutdownGuard { + shutdown_sender: event_tx, + }), + read_version_mapping: hummock_event_handler.read_version_mapping(), + backup_reader, + min_current_epoch, + write_limiter, + }; + + tokio::spawn(hummock_event_handler.start_hummock_event_handler_worker()); + + Ok(instance) + } + + /// Gets the value of a specified `key` in the table specified in `read_options`. + /// The result is based on a snapshot corresponding to the given `epoch`. + /// if `key` has consistent hash virtual node value, then such value is stored in `value_meta` + /// + /// If `Ok(Some())` is returned, the key is found. If `Ok(None)` is returned, + /// the key is not found. If `Err()` is returned, the searching for the key + /// failed due to other non-EOF errors. + async fn get_inner( + &self, + key: TableKey, + epoch: HummockEpoch, + read_options: ReadOptions, + ) -> StorageResult> { + let key_range = (Bound::Included(key.clone()), Bound::Included(key.clone())); + + let read_version_tuple = if read_options.read_version_from_backup { + self.build_read_version_tuple_from_backup(epoch).await? + } else { + self.build_read_version_tuple(epoch, read_options.table_id, &key_range)? + }; + + self.hummock_version_reader + .get(key, epoch, read_options, read_version_tuple) + .await + } + + async fn iter_inner( + &self, + key_range: TableKeyRange, + epoch: u64, + read_options: ReadOptions, + ) -> StorageResult> { + let read_version_tuple = if read_options.read_version_from_backup { + self.build_read_version_tuple_from_backup(epoch).await? + } else { + self.build_read_version_tuple(epoch, read_options.table_id, &key_range)? + }; + + self.hummock_version_reader + .iter(key_range, epoch, read_options, read_version_tuple) + .await + } + + async fn build_read_version_tuple_from_backup( + &self, + epoch: u64, + ) -> StorageResult<(Vec, Vec, CommittedVersion)> { + match self.backup_reader.try_get_hummock_version(epoch).await { + Ok(Some(backup_version)) => { + validate_safe_epoch(backup_version.safe_epoch(), epoch)?; + Ok((Vec::default(), Vec::default(), backup_version)) + } + Ok(None) => Err(HummockError::read_backup_error(format!( + "backup include epoch {} not found", + epoch + )) + .into()), + Err(e) => Err(e), + } + } + + fn build_read_version_tuple( + &self, + epoch: u64, + table_id: TableId, + key_range: &TableKeyRange, + ) -> StorageResult<(Vec, Vec, CommittedVersion)> { + let pinned_version = self.pinned_version.load(); + validate_safe_epoch(pinned_version.safe_epoch(), epoch)?; + + // check epoch if lower mce + let read_version_tuple: (Vec, Vec, CommittedVersion) = + if epoch <= pinned_version.max_committed_epoch() { + // read committed_version directly without build snapshot + (Vec::default(), Vec::default(), (**pinned_version).clone()) + } else { + let read_version_vec = { + let read_guard = self.read_version_mapping.read(); + read_guard + .get(&table_id) + .map(|v| { + v.values() + .filter(|v| !v.read_arc().is_replicated()) + .cloned() + .collect_vec() + }) + .unwrap_or_default() + }; + + // When the system has just started and no state has been created, the memory state + // may be empty + if read_version_vec.is_empty() { + (Vec::default(), Vec::default(), (**pinned_version).clone()) + } else { + let (imm_vec, sst_vec) = + read_filter_for_batch(epoch, table_id, key_range, read_version_vec)?; + let committed_version = (**pinned_version).clone(); + + (imm_vec, sst_vec, committed_version) + } + }; + + Ok(read_version_tuple) + } + + async fn new_local_inner(&self, option: NewLocalOptions) -> LocalHummockStorage { + let (tx, rx) = tokio::sync::oneshot::channel(); + self.hummock_event_sender + .send(HummockEvent::RegisterReadVersion { + table_id: option.table_id, + new_read_version_sender: tx, + is_replicated: option.is_replicated, + }) + .unwrap(); + + let (basic_read_version, instance_guard) = rx.await.unwrap(); + let version_update_notifier_tx = self.version_update_notifier_tx.clone(); + LocalHummockStorage::new( + instance_guard, + basic_read_version, + self.hummock_version_reader.clone(), + self.hummock_event_sender.clone(), + self.buffer_tracker.get_memory_limiter().clone(), + self.write_limiter.clone(), + option, + version_update_notifier_tx, + ) + } + + pub fn sstable_store(&self) -> SstableStoreRef { + self.context.sstable_store.clone() + } + + pub fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef { + &self.sstable_object_id_manager + } + + pub fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManager { + &self.filter_key_extractor_manager + } + + pub fn get_memory_limiter(&self) -> Arc { + self.buffer_tracker.get_memory_limiter().clone() + } + + pub fn get_pinned_version(&self) -> PinnedVersion { + self.pinned_version.load().deref().deref().clone() + } + + pub fn backup_reader(&self) -> BackupReaderRef { + self.backup_reader.clone() + } +} + +impl StateStoreRead for HummockStorage { + type IterStream = StreamTypeOfIter; + + fn get( + &self, + key: TableKey, + epoch: u64, + read_options: ReadOptions, + ) -> impl Future>> + '_ { + self.get_inner(key, epoch, read_options) + } + + fn iter( + &self, + key_range: TableKeyRange, + epoch: u64, + read_options: ReadOptions, + ) -> impl Future> + '_ { + self.iter_inner(key_range, epoch, read_options) + } +} + +impl StateStore for HummockStorage { + type Local = LocalHummockStorage; + + /// Waits until the local hummock version contains the epoch. If `wait_epoch` is `Current`, + /// we will only check whether it is le `sealed_epoch` and won't wait. + async fn try_wait_epoch(&self, wait_epoch: HummockReadEpoch) -> StorageResult<()> { + self.validate_read_epoch(wait_epoch)?; + let wait_epoch = match wait_epoch { + HummockReadEpoch::Committed(epoch) => { + assert_ne!(epoch, HummockEpoch::MAX, "epoch should not be u64::MAX"); + epoch + } + _ => return Ok(()), + }; + wait_for_epoch(&self.version_update_notifier_tx, wait_epoch).await + } + + async fn sync(&self, epoch: u64) -> StorageResult { + let (tx, rx) = oneshot::channel(); + self.hummock_event_sender + .send(HummockEvent::AwaitSyncEpoch { + new_sync_epoch: epoch, + sync_result_sender: tx, + }) + .expect("should send success"); + Ok(rx.await.expect("should wait success")?) + } + + fn seal_epoch(&self, epoch: u64, is_checkpoint: bool) { + // Update `seal_epoch` synchronously, + // as `HummockEvent::SealEpoch` is handled asynchronously. + let prev_epoch = self.seal_epoch.swap(epoch, MemOrdering::SeqCst); + assert_gt!(epoch, prev_epoch); + + if is_checkpoint { + let _ = self.min_current_epoch.compare_exchange( + HummockEpoch::MAX, + epoch, + MemOrdering::SeqCst, + MemOrdering::SeqCst, + ); + } + self.hummock_event_sender + .send(HummockEvent::SealEpoch { + epoch, + is_checkpoint, + }) + .expect("should send success"); + StoreLocalStatistic::flush_all(); + } + + async fn clear_shared_buffer(&self) -> StorageResult<()> { + let (tx, rx) = oneshot::channel(); + self.hummock_event_sender + .send(HummockEvent::Clear(tx)) + .expect("should send success"); + rx.await.expect("should wait success"); + + let epoch = self.pinned_version.load().max_committed_epoch(); + self.min_current_epoch + .store(HummockEpoch::MAX, MemOrdering::SeqCst); + self.seal_epoch.store(epoch, MemOrdering::SeqCst); + + Ok(()) + } + + fn new_local(&self, option: NewLocalOptions) -> impl Future + Send + '_ { + self.new_local_inner(option) + } + + fn validate_read_epoch(&self, epoch: HummockReadEpoch) -> StorageResult<()> { + if let HummockReadEpoch::Current(read_current_epoch) = epoch { + assert_ne!( + read_current_epoch, + HummockEpoch::MAX, + "epoch should not be u64::MAX" + ); + let sealed_epoch = self.seal_epoch.load(MemOrdering::SeqCst); + if read_current_epoch > sealed_epoch { + tracing::warn!( + "invalid barrier read {} > max seal epoch {}", + read_current_epoch, + sealed_epoch + ); + return Err(HummockError::read_current_epoch().into()); + } + + let min_current_epoch = self.min_current_epoch.load(MemOrdering::SeqCst); + if read_current_epoch < min_current_epoch { + tracing::warn!( + "invalid barrier read {} < min current epoch {}", + read_current_epoch, + min_current_epoch + ); + return Err(HummockError::read_current_epoch().into()); + } + } + Ok(()) + } +} + +#[cfg(any(test, feature = "test"))] +impl HummockStorage { + pub async fn seal_and_sync_epoch(&self, epoch: u64) -> StorageResult { + self.seal_epoch(epoch, true); + self.sync(epoch).await + } + + /// Used in the compaction test tool + pub async fn update_version_and_wait(&self, version: HummockVersion) { + use tokio::task::yield_now; + let version_id = version.id; + self.hummock_event_sender + .send(HummockEvent::VersionUpdate( + version_update_payload::Payload::PinnedVersion(version), + )) + .unwrap(); + loop { + if self.pinned_version.load().id() >= version_id { + break; + } + + yield_now().await + } + } + + pub async fn wait_version(&self, version: HummockVersion) { + use tokio::task::yield_now; + loop { + if self.pinned_version.load().id() >= version.id { + break; + } + + yield_now().await + } + } + + pub fn get_shared_buffer_size(&self) -> usize { + self.buffer_tracker.get_buffer_size() + } + + pub async fn try_wait_epoch_for_test(&self, wait_epoch: u64) { + let mut rx = self.version_update_notifier_tx.subscribe(); + while *(rx.borrow_and_update()) < wait_epoch { + rx.changed().await.unwrap(); + } + } + + /// Creates a [`HummockStorage`] with default stats. Should only be used by tests. + pub async fn for_test( + options: Arc, + sstable_store: SstableStoreRef, + hummock_meta_client: Arc, + notification_client: impl NotificationClient, + ) -> HummockResult { + Self::new( + options, + sstable_store, + hummock_meta_client, + notification_client, + Arc::new(RpcFilterKeyExtractorManager::default()), + Arc::new(HummockStateStoreMetrics::unused()), + Arc::new(CompactorMetrics::unused()), + ) + .await + } + + pub fn storage_opts(&self) -> &Arc { + &self.context.storage_opts + } + + pub fn version_reader(&self) -> &HummockVersionReader { + &self.hummock_version_reader + } + + pub async fn wait_version_update(&self, old_id: u64) -> u64 { + use tokio::task::yield_now; + loop { + let cur_id = self.pinned_version.load().id(); + if cur_id > old_id { + return cur_id; + } + yield_now().await; + } + } +} diff --git a/src/storage/src/hummock/store/state_store.rs b/src/storage/src/hummock/store/local_hummock_storage.rs similarity index 88% rename from src/storage/src/hummock/store/state_store.rs rename to src/storage/src/hummock/store/local_hummock_storage.rs index f5a6f65b8d6fc..94b536bf6919f 100644 --- a/src/storage/src/hummock/store/state_store.rs +++ b/src/storage/src/hummock/store/local_hummock_storage.rs @@ -19,8 +19,9 @@ use std::sync::Arc; use await_tree::InstrumentAwait; use bytes::Bytes; use parking_lot::RwLock; +use prometheus::IntGauge; use risingwave_common::catalog::{TableId, TableOption}; -use risingwave_hummock_sdk::key::{map_table_key_range, TableKey, TableKeyRange}; +use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; use risingwave_hummock_sdk::HummockEpoch; use tokio::sync::mpsc; use tracing::{warn, Instrument}; @@ -48,6 +49,8 @@ use crate::storage_value::StorageValue; use crate::store::*; use crate::StateStoreIter; +/// `LocalHummockStorage` is a handle for a state table shard to access data from and write data to +/// the hummock state backend. It is created via `HummockStorage::new_local`. pub struct LocalHummockStorage { mem_table: MemTable, @@ -86,6 +89,10 @@ pub struct LocalHummockStorage { write_limiter: WriteLimiterRef, version_update_notifier_tx: Arc>, + + mem_table_size: IntGauge, + + mem_table_item_count: IntGauge, } impl LocalHummockStorage { @@ -141,24 +148,22 @@ impl LocalHummockStorage { pub async fn may_exist_inner( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult { if self.mem_table.iter(key_range.clone()).next().is_some() { return Ok(true); } - let table_key_range = map_table_key_range(key_range); - let read_snapshot = read_filter_for_local( HummockEpoch::MAX, // Use MAX epoch to make sure we read from latest read_options.table_id, - &table_key_range, + &key_range, self.read_version.clone(), )?; self.hummock_version_reader - .may_exist(table_key_range, read_options, read_snapshot) + .may_exist(key_range, read_options, read_snapshot) .await } } @@ -168,22 +173,22 @@ impl StateStoreRead for LocalHummockStorage { fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> impl Future>> + '_ { assert!(epoch <= self.epoch()); - self.get_inner(TableKey(key), epoch, read_options) + self.get_inner(key, epoch, read_options) } fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> impl Future> + '_ { assert!(epoch <= self.epoch()); - self.iter_inner(map_table_key_range(key_range), epoch, read_options) + self.iter_inner(key_range, epoch, read_options) .instrument(tracing::trace_span!("hummock_iter")) } } @@ -193,18 +198,19 @@ impl LocalStateStore for LocalHummockStorage { fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future> + Send + '_ { self.may_exist_inner(key_range, read_options) } - async fn get(&self, key: Bytes, read_options: ReadOptions) -> StorageResult> { + async fn get( + &self, + key: TableKey, + read_options: ReadOptions, + ) -> StorageResult> { match self.mem_table.buffer.get(&key) { - None => { - self.get_inner(TableKey(key), self.epoch(), read_options) - .await - } + None => self.get_inner(key, self.epoch(), read_options).await, Some(op) => match op { KeyOp::Insert(value) | KeyOp::Update((_, value)) => Ok(Some(value.clone())), KeyOp::Delete(_) => Ok(None), @@ -215,19 +221,13 @@ impl LocalStateStore for LocalHummockStorage { #[allow(clippy::manual_async_fn)] fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { async move { let stream = self - .iter_inner( - map_table_key_range(key_range.clone()), - self.epoch(), - read_options, - ) + .iter_inner(key_range.clone(), self.epoch(), read_options) .await?; - let (l, r) = key_range; - let key_range = (l.map(Bytes::from), r.map(Bytes::from)); Ok(merge_stream( self.mem_table.iter(key_range), stream, @@ -237,16 +237,30 @@ impl LocalStateStore for LocalHummockStorage { } } - fn insert(&mut self, key: Bytes, new_val: Bytes, old_val: Option) -> StorageResult<()> { + fn insert( + &mut self, + key: TableKey, + new_val: Bytes, + old_val: Option, + ) -> StorageResult<()> { match old_val { None => self.mem_table.insert(key, new_val)?, Some(old_val) => self.mem_table.update(key, old_val, new_val)?, }; + + self.mem_table_size + .set(self.mem_table.kv_size.size() as i64); + self.mem_table_item_count + .set(self.mem_table.buffer.len() as i64); Ok(()) } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { self.mem_table.delete(key, old_val)?; + self.mem_table_size + .set(self.mem_table.kv_size.size() as i64); + self.mem_table_item_count + .set(self.mem_table.buffer.len() as i64); Ok(()) } @@ -254,6 +268,8 @@ impl LocalStateStore for LocalHummockStorage { &mut self, delete_ranges: Vec<(Bound, Bound)>, ) -> StorageResult { + self.mem_table_size.set(0); + self.mem_table_item_count.set(0); debug_assert!(delete_ranges .iter() .map(|(key, _)| key) @@ -360,7 +376,7 @@ impl LocalStateStore for LocalHummockStorage { impl LocalHummockStorage { async fn flush_inner( &mut self, - kv_pairs: Vec<(Bytes, StorageValue)>, + kv_pairs: Vec<(TableKey, StorageValue)>, delete_ranges: Vec<(Bound, Bound)>, write_options: WriteOptions, ) -> StorageResult { @@ -454,6 +470,14 @@ impl LocalHummockStorage { version_update_notifier_tx: Arc>, ) -> Self { let stats = hummock_version_reader.stats().clone(); + let mem_table_size = stats.mem_table_memory_size.with_label_values(&[ + &option.table_id.to_string(), + &instance_guard.instance_id.to_string(), + ]); + let mem_table_item_count = stats.mem_table_item_count.with_label_values(&[ + &option.table_id.to_string(), + &instance_guard.instance_id.to_string(), + ]); Self { mem_table: MemTable::new(option.is_consistent_op), epoch: None, @@ -469,6 +493,8 @@ impl LocalHummockStorage { stats, write_limiter, version_update_notifier_tx, + mem_table_size, + mem_table_item_count, } } diff --git a/src/storage/src/hummock/store/mod.rs b/src/storage/src/hummock/store/mod.rs index 1f74b4f004e5c..0831ff35d2dea 100644 --- a/src/storage/src/hummock/store/mod.rs +++ b/src/storage/src/hummock/store/mod.rs @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod memtable; -pub mod state_store; +pub mod hummock_storage; +pub mod local_hummock_storage; pub mod version; + +pub use hummock_storage::*; +pub use local_hummock_storage::*; diff --git a/src/storage/src/hummock/store/version.rs b/src/storage/src/hummock/store/version.rs index 38f7cb4e54e3f..f2d5eca81b171 100644 --- a/src/storage/src/hummock/store/version.rs +++ b/src/storage/src/hummock/store/version.rs @@ -18,6 +18,7 @@ use std::collections::HashSet; use std::iter::once; use std::sync::Arc; +use await_tree::InstrumentAwait; use bytes::Bytes; use itertools::Itertools; use parking_lot::RwLock; @@ -31,8 +32,7 @@ use risingwave_pb::hummock::{HummockVersionDelta, LevelType, SstableInfo}; use sync_point::sync_point; use tracing::Instrument; -use super::memtable::{ImmId, ImmutableMemtable}; -use super::state_store::StagingDataIterator; +use super::StagingDataIterator; use crate::error::StorageResult; use crate::hummock::iterator::{ ConcatIterator, ForwardMergeRangeIterator, HummockIteratorUnion, OrderedMergeIteratorInner, @@ -41,7 +41,7 @@ use crate::hummock::iterator::{ use crate::hummock::local_version::pinned_version::PinnedVersion; use crate::hummock::sstable::SstableIteratorReadOptions; use crate::hummock::sstable_store::SstableStoreRef; -use crate::hummock::store::state_store::HummockStorageIterator; +use crate::hummock::store::HummockStorageIterator; use crate::hummock::utils::{ check_subset_preserve_order, filter_single_sst, prune_nonoverlapping_ssts, prune_overlapping_ssts, range_overlap, search_sst_idx, @@ -50,6 +50,7 @@ use crate::hummock::{ get_from_batch, get_from_sstable_info, hit_sstable_bloom_filter, Sstable, SstableDeleteRangeIterator, SstableIterator, }; +use crate::mem_table::{ImmId, ImmutableMemtable}; use crate::monitor::{ GetLocalMetricsGuard, HummockStateStoreMetrics, MayExistLocalMetricsGuard, StoreLocalStatistic, }; @@ -112,7 +113,6 @@ impl StagingSstableInfo { #[derive(Clone)] pub enum StagingData { - // ImmMem(Arc), ImmMem(ImmutableMemtable), MergedImmMem(ImmutableMemtable), Sst(StagingSstableInfo), @@ -925,7 +925,7 @@ impl HummockVersionReader { ); user_iter .rewind() - .instrument(tracing::trace_span!("rewind")) + .verbose_instrument_await("rewind") .await?; local_stats.found_key = user_iter.is_valid(); local_stats.sub_iter_count = local_stats.staging_imm_iter_count diff --git a/src/storage/src/hummock/test_utils.rs b/src/storage/src/hummock/test_utils.rs index cded494d128a3..424ca7d1a2131 100644 --- a/src/storage/src/hummock/test_utils.rs +++ b/src/storage/src/hummock/test_utils.rs @@ -21,7 +21,7 @@ use itertools::Itertools; use risingwave_common::catalog::TableId; use risingwave_common::hash::VirtualNode; use risingwave_common::must_match; -use risingwave_hummock_sdk::key::{FullKey, PointRange, UserKey}; +use risingwave_hummock_sdk::key::{FullKey, PointRange, TableKey, UserKey}; use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId}; use risingwave_pb::hummock::{KeyRange, SstableInfo}; @@ -35,8 +35,8 @@ use crate::filter_key_extractor::{FilterKeyExtractorImpl, FullKeyFilterKeyExtrac use crate::hummock::shared_buffer::shared_buffer_batch::SharedBufferBatch; use crate::hummock::value::HummockValue; use crate::hummock::{ - CachePolicy, DeleteRangeTombstone, FilterBuilder, LruCache, Sstable, SstableBuilder, - SstableBuilderOptions, SstableStoreRef, SstableWriter, Xor16FilterBuilder, + BlockedXor16FilterBuilder, CachePolicy, DeleteRangeTombstone, FilterBuilder, LruCache, Sstable, + SstableBuilder, SstableBuilderOptions, SstableStoreRef, SstableWriter, Xor16FilterBuilder, }; use crate::monitor::StoreLocalStatistic; use crate::opts::StorageOpts; @@ -63,19 +63,19 @@ pub fn default_opts_for_test() -> StorageOpts { } } -pub fn gen_dummy_batch(n: u64) -> Vec<(Bytes, StorageValue)> { +pub fn gen_dummy_batch(n: u64) -> Vec<(TableKey, StorageValue)> { vec![( - Bytes::from(iterator_test_table_key_of(n as usize)), + TableKey(Bytes::from(iterator_test_table_key_of(n as usize))), StorageValue::new_put(b"value1".to_vec()), )] } -pub fn gen_dummy_batch_several_keys(n: usize) -> Vec<(Bytes, StorageValue)> { +pub fn gen_dummy_batch_several_keys(n: usize) -> Vec<(TableKey, StorageValue)> { let mut kvs = vec![]; let v = Bytes::from(b"value1".to_vec().repeat(100)); for idx in 0..n { kvs.push(( - Bytes::from(iterator_test_table_key_of(idx)), + TableKey(Bytes::from(iterator_test_table_key_of(idx))), StorageValue::new_put(v.clone()), )); } @@ -150,7 +150,7 @@ pub async fn gen_test_sstable_data( ) -> (Bytes, SstableMeta) { let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opts), opts); for (key, value) in kv_iter { - b.add_for_test(key.to_ref(), value.as_slice(), true) + b.add_for_test(key.to_ref(), value.as_slice()) .await .unwrap(); } @@ -200,11 +200,11 @@ pub async fn put_sst( pub async fn gen_test_sstable_impl + Clone + Default + Eq, F: FilterBuilder>( opts: SstableBuilderOptions, object_id: HummockSstableObjectId, - kv_iter: impl Iterator, HummockValue)>, + kv_iter: impl IntoIterator, HummockValue)>, range_tombstones: Vec, sstable_store: SstableStoreRef, policy: CachePolicy, -) -> (Sstable, SstableInfo) { +) -> SstableInfo { let writer_opts = SstableWriterOptions { capacity_hint: None, tracker: None, @@ -225,7 +225,7 @@ pub async fn gen_test_sstable_impl + Clone + Default + Eq, F: Fil let mut last_key = FullKey::::default(); let mut user_key_last_delete = HummockEpoch::MAX; for (mut key, value) in kv_iter { - let mut is_new_user_key = + let is_new_user_key = last_key.is_empty() || key.user_key.as_ref() != last_key.user_key.as_ref(); let epoch = key.epoch; if is_new_user_key { @@ -254,28 +254,16 @@ pub async fn gen_test_sstable_impl + Clone + Default + Eq, F: Fil user_key_last_delete = earliest_delete_epoch; key.epoch = earliest_delete_epoch; - b.add(key.to_ref(), HummockValue::Delete, is_new_user_key) - .await - .unwrap(); + b.add(key.to_ref(), HummockValue::Delete).await.unwrap(); key.epoch = epoch; - is_new_user_key = false; } - b.add(key.to_ref(), value.as_slice(), is_new_user_key) - .await - .unwrap(); + b.add(key.to_ref(), value.as_slice()).await.unwrap(); } b.add_monotonic_deletes(create_monotonic_events(range_tombstones)); let output = b.finish().await.unwrap(); output.writer_output.await.unwrap().unwrap(); - let table = sstable_store - .sstable( - &output.sst_info.sst_info, - &mut StoreLocalStatistic::default(), - ) - .await - .unwrap(); - (table.value().as_ref().clone(), output.sst_info.sst_info) + output.sst_info.sst_info } /// Generate a test table from the given `kv_iter` and put the kv value to `sstable_store` @@ -285,26 +273,30 @@ pub async fn gen_test_sstable + Clone + Default + Eq>( kv_iter: impl Iterator, HummockValue)>, sstable_store: SstableStoreRef, ) -> Sstable { - gen_test_sstable_impl::<_, Xor16FilterBuilder>( + let sst_info = gen_test_sstable_impl::<_, Xor16FilterBuilder>( opts, object_id, kv_iter, vec![], - sstable_store, + sstable_store.clone(), CachePolicy::NotFill, ) - .await - .0 + .await; + let table = sstable_store + .sstable(&sst_info, &mut StoreLocalStatistic::default()) + .await + .unwrap(); + table.value().as_ref().clone() } /// Generate a test table from the given `kv_iter` and put the kv value to `sstable_store` -pub async fn gen_test_sstable_and_info + Clone + Default + Eq>( +pub async fn gen_test_sstable_info + Clone + Default + Eq>( opts: SstableBuilderOptions, object_id: HummockSstableObjectId, - kv_iter: impl Iterator, HummockValue)>, + kv_iter: impl IntoIterator, HummockValue)>, sstable_store: SstableStoreRef, -) -> (Sstable, SstableInfo) { - gen_test_sstable_impl::<_, Xor16FilterBuilder>( +) -> SstableInfo { + gen_test_sstable_impl::<_, BlockedXor16FilterBuilder>( opts, object_id, kv_iter, @@ -323,16 +315,20 @@ pub async fn gen_test_sstable_with_range_tombstone( range_tombstones: Vec, sstable_store: SstableStoreRef, ) -> Sstable { - gen_test_sstable_impl::<_, Xor16FilterBuilder>( + let sst_info = gen_test_sstable_impl::<_, Xor16FilterBuilder>( opts, object_id, kv_iter, range_tombstones, - sstable_store, + sstable_store.clone(), CachePolicy::NotFill, ) - .await - .0 + .await; + let table = sstable_store + .sstable(&sst_info, &mut StoreLocalStatistic::default()) + .await + .unwrap(); + table.value().as_ref().clone() } /// Generates a user key with table id 0 and the given `table_key` diff --git a/src/storage/src/hummock/utils.rs b/src/storage/src/hummock/utils.rs index d24ff4ab09ec9..7ccb3fbf04790 100644 --- a/src/storage/src/hummock/utils.rs +++ b/src/storage/src/hummock/utils.rs @@ -372,7 +372,7 @@ pub(crate) const ENABLE_SANITY_CHECK: bool = cfg!(debug_assertions); /// Make sure the key to insert should not exist in storage. pub(crate) async fn do_insert_sanity_check( - key: Bytes, + key: TableKey, value: Bytes, inner: &impl StateStoreRead, epoch: u64, @@ -400,7 +400,7 @@ pub(crate) async fn do_insert_sanity_check( /// Make sure that the key to delete should exist in storage and the value should be matched. pub(crate) async fn do_delete_sanity_check( - key: Bytes, + key: TableKey, old_value: Bytes, inner: &impl StateStoreRead, epoch: u64, @@ -437,7 +437,7 @@ pub(crate) async fn do_delete_sanity_check( /// Make sure that the key to update should exist in storage and the value should be matched pub(crate) async fn do_update_sanity_check( - key: Bytes, + key: TableKey, old_value: Bytes, new_value: Bytes, inner: &impl StateStoreRead, @@ -497,9 +497,9 @@ fn validate_delete_range(left: &Bound, right: &Bound) -> bool { } pub(crate) fn filter_with_delete_range<'a>( - kv_iter: impl Iterator + 'a, + kv_iter: impl Iterator, KeyOp)> + 'a, mut delete_ranges_iter: impl Iterator, Bound)> + 'a, -) -> impl Iterator + 'a { +) -> impl Iterator, KeyOp)> + 'a { let mut range = delete_ranges_iter.next(); if let Some((range_start, range_end)) = range { assert!( @@ -511,10 +511,11 @@ pub(crate) fn filter_with_delete_range<'a>( } kv_iter.filter(move |(ref key, _)| { if let Some(range_bound) = range { - if cmp_delete_range_left_bounds(Included(key), range_bound.0.as_ref()) == Ordering::Less + if cmp_delete_range_left_bounds(Included(&key.0), range_bound.0.as_ref()) + == Ordering::Less { true - } else if range_bound.contains(key) { + } else if range_bound.contains(key.as_ref()) { false } else { // Key has exceeded the current key range. Advance to the next range. @@ -532,7 +533,7 @@ pub(crate) fn filter_with_delete_range<'a>( { // Not fall in the next delete range break true; - } else if range_bound.contains(key) { + } else if range_bound.contains(key.as_ref()) { // Fall in the next delete range break false; } else { diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs index 5ebcb4fe78b00..0f2f155f6a903 100644 --- a/src/storage/src/lib.rs +++ b/src/storage/src/lib.rs @@ -14,12 +14,11 @@ #![feature(allocator_api)] #![feature(arc_unwrap_or_clone)] -#![feature(binary_heap_drain_sorted)] #![feature(bound_as_ref)] #![feature(bound_map)] #![feature(custom_test_frameworks)] #![feature(extract_if)] -#![feature(generators)] +#![feature(coroutines)] #![feature(hash_extract_if)] #![feature(lint_reasons)] #![feature(proc_macro_hygiene)] @@ -36,15 +35,13 @@ #![feature(btree_extract_if)] #![feature(exact_size_is_empty)] #![feature(lazy_cell)] -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] #![recursion_limit = "256"] #![feature(error_generic_member_access)] #![feature(let_chains)] #![feature(associated_type_bounds)] #![feature(exclusive_range_pattern)] #![feature(impl_trait_in_assoc_type)] -#![feature(async_fn_in_trait)] -#![feature(return_position_impl_trait_in_trait)] pub mod hummock; pub mod memory; @@ -58,7 +55,6 @@ pub mod error; pub mod opts; pub mod store_impl; pub mod table; -pub mod write_batch; pub mod filter_key_extractor; pub mod mem_table; diff --git a/src/storage/src/mem_table.rs b/src/storage/src/mem_table.rs index 1ff090690aa4a..34508a730a5fb 100644 --- a/src/storage/src/mem_table.rs +++ b/src/storage/src/mem_table.rs @@ -23,10 +23,11 @@ use futures::{pin_mut, StreamExt}; use futures_async_stream::try_stream; use risingwave_common::catalog::{TableId, TableOption}; use risingwave_common::estimate_size::{EstimateSize, KvSize}; -use risingwave_hummock_sdk::key::{FullKey, TableKey}; +use risingwave_hummock_sdk::key::{FullKey, TableKey, TableKeyRange}; use thiserror::Error; use crate::error::{StorageError, StorageResult}; +use crate::hummock::shared_buffer::shared_buffer_batch::{SharedBufferBatch, SharedBufferBatchId}; use crate::hummock::utils::{ cmp_delete_range_left_bounds, do_delete_sanity_check, do_insert_sanity_check, do_update_sanity_check, filter_with_delete_range, ENABLE_SANITY_CHECK, @@ -35,6 +36,10 @@ use crate::row_serde::value_serde::ValueRowSerde; use crate::storage_value::StorageValue; use crate::store::*; +pub type ImmutableMemtable = SharedBufferBatch; + +pub type ImmId = SharedBufferBatchId; + #[derive(Clone, Debug, EstimateSize)] pub enum KeyOp { Insert(Bytes), @@ -46,7 +51,7 @@ pub enum KeyOp { /// `MemTable` is a buffer for modify operations without encoding #[derive(Clone)] pub struct MemTable { - pub(crate) buffer: BTreeMap, + pub(crate) buffer: BTreeMap, KeyOp>, pub(crate) is_consistent_op: bool, pub(crate) kv_size: KvSize, } @@ -54,7 +59,11 @@ pub struct MemTable { #[derive(Error, Debug)] pub enum MemTableError { #[error("Inconsistent operation")] - InconsistentOperation { key: Bytes, prev: KeyOp, new: KeyOp }, + InconsistentOperation { + key: TableKey, + prev: KeyOp, + new: KeyOp, + }, } type Result = std::result::Result>; @@ -77,13 +86,8 @@ impl MemTable { !self.buffer.is_empty() } - /// read methods - pub fn get_key_op(&self, pk: &[u8]) -> Option<&KeyOp> { - self.buffer.get(pk) - } - /// write methods - pub fn insert(&mut self, pk: Bytes, value: Bytes) -> Result<()> { + pub fn insert(&mut self, pk: TableKey, value: Bytes) -> Result<()> { if !self.is_consistent_op { let key_len = std::mem::size_of::() + pk.len(); let insert_value = KeyOp::Insert(value); @@ -125,7 +129,7 @@ impl MemTable { } } - pub fn delete(&mut self, pk: Bytes, old_value: Bytes) -> Result<()> { + pub fn delete(&mut self, pk: TableKey, old_value: Bytes) -> Result<()> { let key_len = std::mem::size_of::() + pk.len(); if !self.is_consistent_op { let delete_value = KeyOp::Delete(old_value); @@ -185,7 +189,12 @@ impl MemTable { } } - pub fn update(&mut self, pk: Bytes, old_value: Bytes, new_value: Bytes) -> Result<()> { + pub fn update( + &mut self, + pk: TableKey, + old_value: Bytes, + new_value: Bytes, + ) -> Result<()> { if !self.is_consistent_op { let key_len = std::mem::size_of::() + pk.len(); @@ -245,13 +254,16 @@ impl MemTable { } } - pub fn into_parts(self) -> BTreeMap { + pub fn into_parts(self) -> BTreeMap, KeyOp> { self.buffer } - pub fn iter<'a, R>(&'a self, key_range: R) -> impl Iterator + pub fn iter<'a, R>( + &'a self, + key_range: R, + ) -> impl Iterator, &'a KeyOp)> where - R: RangeBounds + 'a, + R: RangeBounds> + 'a, { self.buffer.range(key_range) } @@ -291,7 +303,7 @@ impl KeyOp { #[try_stream(ok = StateStoreIterItem, error = StorageError)] pub(crate) async fn merge_stream<'a>( - mem_table_iter: impl Iterator + 'a, + mem_table_iter: impl Iterator, &'a KeyOp)> + 'a, inner_stream: impl StateStoreReadIterStream, table_id: TableId, epoch: u64, @@ -314,17 +326,14 @@ pub(crate) async fn merge_stream<'a>( let (key, key_op) = mem_table_iter.next().unwrap(); match key_op { KeyOp::Insert(value) | KeyOp::Update((_, value)) => { - yield ( - FullKey::new(table_id, TableKey(key.clone()), epoch), - value.clone(), - ) + yield (FullKey::new(table_id, key.clone(), epoch), value.clone()) } _ => {} } } (Some(Ok((inner_key, _))), Some((mem_table_key, _))) => { debug_assert_eq!(inner_key.user_key.table_id, table_id); - match inner_key.user_key.table_key.0.cmp(mem_table_key) { + match inner_key.user_key.table_key.cmp(mem_table_key) { Ordering::Less => { // yield data from storage let (key, value) = inner_stream.next().await.unwrap()?; @@ -354,10 +363,7 @@ pub(crate) async fn merge_stream<'a>( match key_op { KeyOp::Insert(value) => { - yield ( - FullKey::new(table_id, TableKey(key.clone()), epoch), - value.clone(), - ); + yield (FullKey::new(table_id, key.clone(), epoch), value.clone()); } KeyOp::Delete(_) => {} KeyOp::Update(_) => unreachable!( @@ -409,13 +415,17 @@ impl LocalStateStore for MemtableLocalState #[allow(clippy::unused_async)] async fn may_exist( &self, - _key_range: IterKeyRange, + _key_range: TableKeyRange, _read_options: ReadOptions, ) -> StorageResult { Ok(true) } - async fn get(&self, key: Bytes, read_options: ReadOptions) -> StorageResult> { + async fn get( + &self, + key: TableKey, + read_options: ReadOptions, + ) -> StorageResult> { match self.mem_table.buffer.get(&key) { None => self.inner.get(key, self.epoch(), read_options).await, Some(op) => match op { @@ -428,7 +438,7 @@ impl LocalStateStore for MemtableLocalState #[allow(clippy::manual_async_fn)] fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { async move { @@ -436,8 +446,6 @@ impl LocalStateStore for MemtableLocalState .inner .iter(key_range.clone(), self.epoch(), read_options) .await?; - let (l, r) = key_range; - let key_range = (l.map(Bytes::from), r.map(Bytes::from)); Ok(merge_stream( self.mem_table.iter(key_range), stream, @@ -447,7 +455,12 @@ impl LocalStateStore for MemtableLocalState } } - fn insert(&mut self, key: Bytes, new_val: Bytes, old_val: Option) -> StorageResult<()> { + fn insert( + &mut self, + key: TableKey, + new_val: Bytes, + old_val: Option, + ) -> StorageResult<()> { match old_val { None => self.mem_table.insert(key, new_val)?, Some(old_val) => self.mem_table.update(key, old_val, new_val)?, @@ -455,7 +468,7 @@ impl LocalStateStore for MemtableLocalState Ok(()) } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { Ok(self.mem_table.delete(key, old_val)?) } @@ -567,6 +580,7 @@ impl LocalStateStore for MemtableLocalState #[cfg(test)] mod tests { use bytes::Bytes; + use risingwave_hummock_sdk::key::TableKey; use crate::mem_table::{KeyOp, MemTable}; @@ -575,7 +589,9 @@ mod tests { let mut mem_table = MemTable::new(true); assert_eq!(mem_table.kv_size.size(), 0); - mem_table.insert("key1".into(), "value1".into()).unwrap(); + mem_table + .insert(TableKey("key1".into()), "value1".into()) + .unwrap(); assert_eq!( mem_table.kv_size.size(), std::mem::size_of::() @@ -587,7 +603,9 @@ mod tests { // delete mem_table.drain(); assert_eq!(mem_table.kv_size.size(), 0); - mem_table.delete("key2".into(), "value2".into()).unwrap(); + mem_table + .delete(TableKey("key2".into()), "value2".into()) + .unwrap(); assert_eq!( mem_table.kv_size.size(), std::mem::size_of::() @@ -595,7 +613,9 @@ mod tests { + std::mem::size_of::() + Bytes::from("value2").len() ); - mem_table.insert("key2".into(), "value22".into()).unwrap(); + mem_table + .insert(TableKey("key2".into()), "value22".into()) + .unwrap(); assert_eq!( mem_table.kv_size.size(), std::mem::size_of::() @@ -605,7 +625,9 @@ mod tests { + Bytes::from("value2").len() ); - mem_table.delete("key2".into(), "value22".into()).unwrap(); + mem_table + .delete(TableKey("key2".into()), "value22".into()) + .unwrap(); assert_eq!( mem_table.kv_size.size(), @@ -618,7 +640,9 @@ mod tests { // update mem_table.drain(); assert_eq!(mem_table.kv_size.size(), 0); - mem_table.insert("key3".into(), "value3".into()).unwrap(); + mem_table + .insert(TableKey("key3".into()), "value3".into()) + .unwrap(); assert_eq!( mem_table.kv_size.size(), std::mem::size_of::() @@ -629,7 +653,7 @@ mod tests { // update-> insert mem_table - .update("key3".into(), "value3".into(), "value333".into()) + .update(TableKey("key3".into()), "value3".into(), "value333".into()) .unwrap(); assert_eq!( mem_table.kv_size.size(), @@ -641,7 +665,7 @@ mod tests { mem_table.drain(); mem_table - .update("key4".into(), "value4".into(), "value44".into()) + .update(TableKey("key4".into()), "value4".into(), "value44".into()) .unwrap(); assert_eq!( @@ -653,7 +677,11 @@ mod tests { + Bytes::from("value44").len() ); mem_table - .update("key4".into(), "value44".into(), "value4444".into()) + .update( + TableKey("key4".into()), + "value44".into(), + "value4444".into(), + ) .unwrap(); assert_eq!( @@ -671,7 +699,10 @@ mod tests { let mut mem_table = MemTable::new(false); assert_eq!(mem_table.kv_size.size(), 0); - mem_table.insert("key1".into(), "value1".into()).unwrap(); + mem_table + .insert(TableKey("key1".into()), "value1".into()) + .unwrap(); + assert_eq!( mem_table.kv_size.size(), std::mem::size_of::() @@ -680,7 +711,9 @@ mod tests { + Bytes::from("value1").len() ); - mem_table.insert("key1".into(), "value111".into()).unwrap(); + mem_table + .insert(TableKey("key1".into()), "value111".into()) + .unwrap(); assert_eq!( mem_table.kv_size.size(), std::mem::size_of::() @@ -691,7 +724,7 @@ mod tests { mem_table.drain(); mem_table - .update("key4".into(), "value4".into(), "value44".into()) + .update(TableKey("key4".into()), "value4".into(), "value44".into()) .unwrap(); assert_eq!( @@ -703,7 +736,11 @@ mod tests { + Bytes::from("value44").len() ); mem_table - .update("key4".into(), "value44".into(), "value4444".into()) + .update( + TableKey("key4".into()), + "value44".into(), + "value4444".into(), + ) .unwrap(); assert_eq!( diff --git a/src/storage/src/memory.rs b/src/storage/src/memory.rs index 26b37c83cbf4a..ac52b65e5488f 100644 --- a/src/storage/src/memory.rs +++ b/src/storage/src/memory.rs @@ -20,7 +20,7 @@ use std::sync::{Arc, LazyLock}; use bytes::Bytes; use parking_lot::RwLock; use risingwave_common::catalog::TableId; -use risingwave_hummock_sdk::key::{FullKey, TableKey, UserKey}; +use risingwave_hummock_sdk::key::{FullKey, TableKey, TableKeyRange, UserKey}; use risingwave_hummock_sdk::{HummockEpoch, HummockReadEpoch}; use crate::error::StorageResult; @@ -501,7 +501,7 @@ impl MemoryStateStore { impl RangeKvStateStore { fn scan( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, table_id: TableId, limit: Option, @@ -538,7 +538,7 @@ impl StateStoreRead for RangeKvStateStore { #[allow(clippy::unused_async)] async fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> StorageResult> { @@ -556,7 +556,7 @@ impl StateStoreRead for RangeKvStateStore { #[allow(clippy::unused_async)] async fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> StorageResult { @@ -575,7 +575,7 @@ impl StateStoreWrite for RangeKvStateStore { #[allow(clippy::unused_async)] async fn ingest_batch( &self, - mut kv_pairs: Vec<(Bytes, StorageValue)>, + mut kv_pairs: Vec<(TableKey, StorageValue)>, delete_ranges: Vec<(Bound, Bound)>, write_options: WriteOptions, ) -> StorageResult { @@ -594,7 +594,7 @@ impl StateStoreWrite for RangeKvStateStore { ), None, )? { - delete_keys.insert(key.user_key.table_key.0); + delete_keys.insert(key.user_key.table_key); } } for key in delete_keys { @@ -606,7 +606,7 @@ impl StateStoreWrite for RangeKvStateStore { .ingest_batch(kv_pairs.into_iter().map(|(key, value)| { size += key.len() + value.size(); ( - FullKey::new(write_options.table_id, TableKey(key), epoch), + FullKey::new(write_options.table_id, key, epoch), value.user_value, ) }))?; @@ -729,8 +729,14 @@ mod tests { state_store .ingest_batch( vec![ - (b"a".to_vec().into(), StorageValue::new_put(b"v1".to_vec())), - (b"b".to_vec().into(), StorageValue::new_put(b"v1".to_vec())), + ( + TableKey(Bytes::from(b"a".to_vec())), + StorageValue::new_put(b"v1".to_vec()), + ), + ( + TableKey(Bytes::from(b"b".to_vec())), + StorageValue::new_put(b"v1".to_vec()), + ), ], vec![], WriteOptions { @@ -743,8 +749,14 @@ mod tests { state_store .ingest_batch( vec![ - (b"a".to_vec().into(), StorageValue::new_put(b"v2".to_vec())), - (b"b".to_vec().into(), StorageValue::new_delete()), + ( + TableKey(Bytes::from(b"a".to_vec())), + StorageValue::new_put(b"v2".to_vec()), + ), + ( + TableKey(Bytes::from(b"b".to_vec())), + StorageValue::new_delete(), + ), ], vec![], WriteOptions { @@ -758,8 +770,8 @@ mod tests { state_store .scan( ( - Bound::Included(Bytes::from("a")), - Bound::Included(Bytes::from("b")), + Bound::Included(TableKey(Bytes::from("a"))), + Bound::Included(TableKey(Bytes::from("b"))), ), 0, TableId::default(), @@ -785,8 +797,8 @@ mod tests { state_store .scan( ( - Bound::Included(Bytes::from("a")), - Bound::Included(Bytes::from("b")), + Bound::Included(TableKey(Bytes::from("a"))), + Bound::Included(TableKey(Bytes::from("b"))), ), 0, TableId::default(), @@ -804,8 +816,8 @@ mod tests { state_store .scan( ( - Bound::Included(Bytes::from("a")), - Bound::Included(Bytes::from("b")), + Bound::Included(TableKey(Bytes::from("a"))), + Bound::Included(TableKey(Bytes::from("b"))), ), 1, TableId::default(), @@ -821,42 +833,54 @@ mod tests { ); assert_eq!( state_store - .get(Bytes::from("a"), 0, ReadOptions::default(),) + .get(TableKey(Bytes::from("a")), 0, ReadOptions::default(),) .await .unwrap(), Some(Bytes::from("v1")) ); assert_eq!( state_store - .get(Bytes::copy_from_slice(b"b"), 0, ReadOptions::default(),) + .get( + TableKey(Bytes::copy_from_slice(b"b")), + 0, + ReadOptions::default(), + ) .await .unwrap(), Some(b"v1".to_vec().into()) ); assert_eq!( state_store - .get(Bytes::copy_from_slice(b"c"), 0, ReadOptions::default(),) + .get( + TableKey(Bytes::copy_from_slice(b"c")), + 0, + ReadOptions::default(), + ) .await .unwrap(), None ); assert_eq!( state_store - .get(Bytes::copy_from_slice(b"a"), 1, ReadOptions::default(),) + .get( + TableKey(Bytes::copy_from_slice(b"a")), + 1, + ReadOptions::default(), + ) .await .unwrap(), Some(b"v2".to_vec().into()) ); assert_eq!( state_store - .get(Bytes::from("b"), 1, ReadOptions::default(),) + .get(TableKey(Bytes::from("b")), 1, ReadOptions::default(),) .await .unwrap(), None ); assert_eq!( state_store - .get(Bytes::from("c"), 1, ReadOptions::default()) + .get(TableKey(Bytes::from("c")), 1, ReadOptions::default()) .await .unwrap(), None diff --git a/src/storage/src/monitor/compactor_metrics.rs b/src/storage/src/monitor/compactor_metrics.rs index 31a36116122a7..d71fd8ac87b7a 100644 --- a/src/storage/src/monitor/compactor_metrics.rs +++ b/src/storage/src/monitor/compactor_metrics.rs @@ -26,6 +26,7 @@ use risingwave_common::monitor::GLOBAL_METRICS_REGISTRY; #[derive(Debug, Clone)] pub struct CompactorMetrics { pub compaction_upload_sst_counts: GenericCounter, + pub compact_fast_runner_bytes: GenericCounter, pub compact_write_bytes: GenericCounterVec, pub compact_read_current_level: GenericCounterVec, pub compact_read_next_level: GenericCounterVec, @@ -211,7 +212,12 @@ impl CompactorMetrics { "Total size of compaction files size that have been written to object store from shared buffer", registry ).unwrap(); - + let compact_fast_runner_bytes = register_int_counter_with_registry!( + "compactor_fast_compact_bytes", + "Total size of compaction files size of fast compactor runner", + registry + ) + .unwrap(); let opts = histogram_opts!( "compactor_sstable_distinct_epoch_count", "Total number gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count", @@ -239,6 +245,7 @@ impl CompactorMetrics { Self { compaction_upload_sst_counts, + compact_fast_runner_bytes, compact_write_bytes, compact_read_current_level, compact_read_next_level, diff --git a/src/storage/src/monitor/hummock_state_store_metrics.rs b/src/storage/src/monitor/hummock_state_store_metrics.rs index 77043b32ab455..1b4894256f11c 100644 --- a/src/storage/src/monitor/hummock_state_store_metrics.rs +++ b/src/storage/src/monitor/hummock_state_store_metrics.rs @@ -17,12 +17,18 @@ use std::sync::{Arc, OnceLock}; use prometheus::core::{AtomicU64, Collector, Desc, GenericCounter, GenericGauge}; use prometheus::{ exponential_buckets, histogram_opts, proto, register_histogram_vec_with_registry, - register_int_counter_vec_with_registry, register_int_gauge_with_registry, Gauge, IntGauge, - Opts, Registry, + register_int_counter_vec_with_registry, register_int_gauge_vec_with_registry, + register_int_gauge_with_registry, Gauge, IntGauge, IntGaugeVec, Opts, Registry, }; use risingwave_common::config::MetricLevel; -use risingwave_common::metrics::{RelabeledCounterVec, RelabeledHistogramVec}; +use risingwave_common::metrics::{ + RelabeledCounterVec, RelabeledGuardedHistogramVec, RelabeledGuardedIntCounterVec, + RelabeledHistogramVec, +}; use risingwave_common::monitor::GLOBAL_METRICS_REGISTRY; +use risingwave_common::{ + register_guarded_histogram_vec_with_registry, register_guarded_int_counter_vec_with_registry, +}; use tracing::warn; /// [`HummockStateStoreMetrics`] stores the performance and IO metrics of `XXXStore` such as @@ -35,17 +41,17 @@ pub struct HummockStateStoreMetrics { pub bloom_filter_true_negative_counts: RelabeledCounterVec, pub bloom_filter_check_counts: RelabeledCounterVec, pub iter_merge_sstable_counts: RelabeledHistogramVec, - pub sst_store_block_request_counts: RelabeledCounterVec, - pub iter_scan_key_counts: RelabeledCounterVec, + pub sst_store_block_request_counts: RelabeledGuardedIntCounterVec<2>, + pub iter_scan_key_counts: RelabeledGuardedIntCounterVec<2>, pub get_shared_buffer_hit_counts: RelabeledCounterVec, pub remote_read_time: RelabeledHistogramVec, - pub iter_fetch_meta_duration: RelabeledHistogramVec, + pub iter_fetch_meta_duration: RelabeledGuardedHistogramVec<1>, pub iter_fetch_meta_cache_unhits: IntGauge, pub iter_slow_fetch_meta_cache_unhits: IntGauge, - pub read_req_bloom_filter_positive_counts: RelabeledCounterVec, - pub read_req_positive_but_non_exist_counts: RelabeledCounterVec, - pub read_req_check_bloom_filter_counts: RelabeledCounterVec, + pub read_req_bloom_filter_positive_counts: RelabeledGuardedIntCounterVec<2>, + pub read_req_positive_but_non_exist_counts: RelabeledGuardedIntCounterVec<2>, + pub read_req_check_bloom_filter_counts: RelabeledGuardedIntCounterVec<2>, pub write_batch_tuple_counts: RelabeledCounterVec, pub write_batch_duration: RelabeledHistogramVec, @@ -67,6 +73,10 @@ pub struct HummockStateStoreMetrics { // uploading task pub uploader_uploading_task_size: GenericGauge, + + // memory + pub mem_table_memory_size: IntGaugeVec, + pub mem_table_item_count: IntGaugeVec, } pub static GLOBAL_HUMMOCK_STATE_STORE_METRICS: OnceLock = OnceLock::new(); @@ -81,6 +91,10 @@ impl HummockStateStoreMetrics { pub fn new(registry: &Registry, metric_level: MetricLevel) -> Self { // 10ms ~ max 2.7h let time_buckets = exponential_buckets(0.01, 10.0, 7).unwrap(); + + // 1ms - 100s + let state_store_read_time_buckets = exponential_buckets(0.001, 10.0, 5).unwrap(); + let bloom_filter_true_negative_counts = register_int_counter_vec_with_registry!( "state_store_bloom_filter_true_negative_counts", "Total number of sstables that have been considered true negative by bloom filters", @@ -122,27 +136,27 @@ impl HummockStateStoreMetrics { ); // ----- sst store ----- - let sst_store_block_request_counts = register_int_counter_vec_with_registry!( + let sst_store_block_request_counts = register_guarded_int_counter_vec_with_registry!( "state_store_sst_store_block_request_counts", "Total number of sst block requests that have been issued to sst store", &["table_id", "type"], registry ) .unwrap(); - let sst_store_block_request_counts = RelabeledCounterVec::with_metric_level( + let sst_store_block_request_counts = RelabeledGuardedIntCounterVec::with_metric_level( MetricLevel::Critical, sst_store_block_request_counts, metric_level, ); - let iter_scan_key_counts = register_int_counter_vec_with_registry!( + let iter_scan_key_counts = register_guarded_int_counter_vec_with_registry!( "state_store_iter_scan_key_counts", "Total number of keys read by iterator", &["table_id", "type"], registry ) .unwrap(); - let iter_scan_key_counts = RelabeledCounterVec::with_metric_level( + let iter_scan_key_counts = RelabeledGuardedIntCounterVec::with_metric_level( MetricLevel::Info, iter_scan_key_counts, metric_level, @@ -177,11 +191,11 @@ impl HummockStateStoreMetrics { let opts = histogram_opts!( "state_store_iter_fetch_meta_duration", "Histogram of iterator fetch SST meta time that have been issued to state store", - time_buckets.clone(), + state_store_read_time_buckets.clone(), ); let iter_fetch_meta_duration = - register_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); - let iter_fetch_meta_duration = RelabeledHistogramVec::with_metric_level( + register_guarded_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); + let iter_fetch_meta_duration = RelabeledGuardedHistogramVec::with_metric_level( MetricLevel::Info, iter_fetch_meta_duration, metric_level, @@ -302,45 +316,64 @@ impl HummockStateStoreMetrics { .register(Box::new(uploader_uploading_task_size.clone())) .unwrap(); - let read_req_bloom_filter_positive_counts = register_int_counter_vec_with_registry!( + let read_req_bloom_filter_positive_counts = register_guarded_int_counter_vec_with_registry!( "state_store_read_req_bloom_filter_positive_counts", "Total number of read request with at least one SST bloom filter check returns positive", &["table_id", "type"], registry ) .unwrap(); - let read_req_bloom_filter_positive_counts = RelabeledCounterVec::with_metric_level( - MetricLevel::Info, - read_req_bloom_filter_positive_counts, - metric_level, - ); + let read_req_bloom_filter_positive_counts = + RelabeledGuardedIntCounterVec::with_metric_level( + MetricLevel::Info, + read_req_bloom_filter_positive_counts, + metric_level, + ); - let read_req_positive_but_non_exist_counts = register_int_counter_vec_with_registry!( + let read_req_positive_but_non_exist_counts = register_guarded_int_counter_vec_with_registry!( "state_store_read_req_positive_but_non_exist_counts", "Total number of read request on non-existent key/prefix with at least one SST bloom filter check returns positive", &["table_id", "type"], registry ) .unwrap(); - let read_req_positive_but_non_exist_counts = RelabeledCounterVec::with_metric_level( - MetricLevel::Info, - read_req_positive_but_non_exist_counts, - metric_level, - ); + let read_req_positive_but_non_exist_counts = + RelabeledGuardedIntCounterVec::with_metric_level( + MetricLevel::Info, + read_req_positive_but_non_exist_counts, + metric_level, + ); - let read_req_check_bloom_filter_counts = register_int_counter_vec_with_registry!( + let read_req_check_bloom_filter_counts = register_guarded_int_counter_vec_with_registry!( "state_store_read_req_check_bloom_filter_counts", "Total number of read request that checks bloom filter with a prefix hint", &["table_id", "type"], registry ) .unwrap(); - let read_req_check_bloom_filter_counts = RelabeledCounterVec::with_metric_level( + + let read_req_check_bloom_filter_counts = RelabeledGuardedIntCounterVec::with_metric_level( MetricLevel::Info, read_req_check_bloom_filter_counts, metric_level, ); + let mem_table_memory_size = register_int_gauge_vec_with_registry!( + "state_store_mem_table_memory_size", + "Memory usage of mem_table", + &["table_id", "instance_id"], + registry + ) + .unwrap(); + + let mem_table_item_count = register_int_gauge_vec_with_registry!( + "state_store_mem_table_item_count", + "Item counts in mem_table", + &["table_id", "instance_id"], + registry + ) + .unwrap(); + Self { bloom_filter_true_negative_counts, bloom_filter_check_counts, @@ -365,6 +398,8 @@ impl HummockStateStoreMetrics { spill_task_size_from_sealed: spill_task_size.with_label_values(&["sealed"]), spill_task_size_from_unsealed: spill_task_size.with_label_values(&["unsealed"]), uploader_uploading_task_size, + mem_table_memory_size, + mem_table_item_count, } } diff --git a/src/storage/src/monitor/monitored_storage_metrics.rs b/src/storage/src/monitor/monitored_storage_metrics.rs index a1517d98918ac..1a33a8bcb6ac1 100644 --- a/src/storage/src/monitor/monitored_storage_metrics.rs +++ b/src/storage/src/monitor/monitored_storage_metrics.rs @@ -19,20 +19,23 @@ use prometheus::{ register_histogram_with_registry, register_int_counter_vec_with_registry, Histogram, Registry, }; use risingwave_common::config::MetricLevel; -use risingwave_common::metrics::{RelabeledCounterVec, RelabeledHistogramVec}; +use risingwave_common::metrics::{ + RelabeledCounterVec, RelabeledGuardedHistogramVec, RelabeledHistogramVec, +}; use risingwave_common::monitor::GLOBAL_METRICS_REGISTRY; +use risingwave_common::register_guarded_histogram_vec_with_registry; /// [`MonitoredStorageMetrics`] stores the performance and IO metrics of Storage. #[derive(Debug, Clone)] pub struct MonitoredStorageMetrics { - pub get_duration: RelabeledHistogramVec, + pub get_duration: RelabeledGuardedHistogramVec<1>, pub get_key_size: RelabeledHistogramVec, pub get_value_size: RelabeledHistogramVec, pub iter_size: RelabeledHistogramVec, pub iter_item: RelabeledHistogramVec, - pub iter_init_duration: RelabeledHistogramVec, - pub iter_scan_duration: RelabeledHistogramVec, + pub iter_init_duration: RelabeledGuardedHistogramVec<1>, + pub iter_scan_duration: RelabeledGuardedHistogramVec<1>, pub may_exist_duration: RelabeledHistogramVec, pub iter_in_process_counts: RelabeledCounterVec, @@ -88,15 +91,22 @@ impl MonitoredStorageMetrics { buckets.extend(exponential_buckets(0.001, 2.0, 5).unwrap()); // 1 ~ 16ms. buckets.extend(exponential_buckets(0.05, 4.0, 5).unwrap()); // 0.05 ~ 1.28s. buckets.push(16.0); // 16s + + // 1ms - 100s + let state_store_read_time_buckets = exponential_buckets(0.001, 10.0, 5).unwrap(); + let get_duration_opts = histogram_opts!( "state_store_get_duration", "Total latency of get that have been issued to state store", - buckets.clone(), + state_store_read_time_buckets.clone(), ); - let get_duration = - register_histogram_vec_with_registry!(get_duration_opts, &["table_id"], registry) - .unwrap(); - let get_duration = RelabeledHistogramVec::with_metric_level( + let get_duration = register_guarded_histogram_vec_with_registry!( + get_duration_opts, + &["table_id"], + registry + ) + .unwrap(); + let get_duration = RelabeledGuardedHistogramVec::with_metric_level( MetricLevel::Critical, get_duration, metric_level, @@ -125,11 +135,11 @@ impl MonitoredStorageMetrics { let opts = histogram_opts!( "state_store_iter_init_duration", "Histogram of the time spent on iterator initialization.", - buckets.clone(), + state_store_read_time_buckets.clone(), ); let iter_init_duration = - register_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); - let iter_init_duration = RelabeledHistogramVec::with_metric_level( + register_guarded_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); + let iter_init_duration = RelabeledGuardedHistogramVec::with_metric_level( MetricLevel::Critical, iter_init_duration, metric_level, @@ -138,11 +148,11 @@ impl MonitoredStorageMetrics { let opts = histogram_opts!( "state_store_iter_scan_duration", "Histogram of the time spent on iterator scanning.", - buckets.clone(), + state_store_read_time_buckets.clone(), ); let iter_scan_duration = - register_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); - let iter_scan_duration = RelabeledHistogramVec::with_metric_level( + register_guarded_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); + let iter_scan_duration = RelabeledGuardedHistogramVec::with_metric_level( MetricLevel::Critical, iter_scan_duration, metric_level, diff --git a/src/storage/src/monitor/monitored_store.rs b/src/storage/src/monitor/monitored_store.rs index 77924a999709d..fd3e235201eb4 100644 --- a/src/storage/src/monitor/monitored_store.rs +++ b/src/storage/src/monitor/monitored_store.rs @@ -20,6 +20,7 @@ use bytes::Bytes; use futures::{Future, TryFutureExt, TryStreamExt}; use futures_async_stream::try_stream; use risingwave_common::catalog::TableId; +use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; use risingwave_hummock_sdk::HummockReadEpoch; use tokio::time::Instant; use tracing::error; @@ -175,7 +176,7 @@ impl StateStoreRead for MonitoredStateStore { fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> impl Future>> + '_ { @@ -186,7 +187,7 @@ impl StateStoreRead for MonitoredStateStore { fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> impl Future> + '_ { @@ -203,7 +204,7 @@ impl LocalStateStore for MonitoredStateStore { async fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult { let table_id_label = read_options.table_id.to_string(); @@ -223,7 +224,7 @@ impl LocalStateStore for MonitoredStateStore { fn get( &self, - key: Bytes, + key: TableKey, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { let table_id = read_options.table_id; @@ -234,7 +235,7 @@ impl LocalStateStore for MonitoredStateStore { fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { let table_id = read_options.table_id; @@ -243,12 +244,17 @@ impl LocalStateStore for MonitoredStateStore { .map_ok(identity) } - fn insert(&mut self, key: Bytes, new_val: Bytes, old_val: Option) -> StorageResult<()> { + fn insert( + &mut self, + key: TableKey, + new_val: Bytes, + old_val: Option, + ) -> StorageResult<()> { // TODO: collect metrics self.inner.insert(key, new_val, old_val) } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { // TODO: collect metrics self.inner.delete(key, old_val) } diff --git a/src/storage/src/monitor/traced_store.rs b/src/storage/src/monitor/traced_store.rs index 505c0460552a7..e92b5974844e2 100644 --- a/src/storage/src/monitor/traced_store.rs +++ b/src/storage/src/monitor/traced_store.rs @@ -17,6 +17,7 @@ use std::ops::Bound; use bytes::Bytes; use futures::{Future, TryFutureExt, TryStreamExt}; use futures_async_stream::try_stream; +use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; use risingwave_hummock_sdk::HummockReadEpoch; use risingwave_hummock_trace::{ init_collector, should_use_trace, ConcurrentId, MayTraceSpan, OperationResult, StorageType, @@ -82,13 +83,13 @@ impl TracedStateStore { async fn traced_get( &self, - key: Bytes, + key: TableKey, epoch: Option, read_options: ReadOptions, get_future: impl Future>>, ) -> StorageResult> { let span = TraceSpan::new_get_span( - key.clone(), + key.0.clone(), epoch, read_options.clone().into(), self.storage_type, @@ -111,7 +112,7 @@ impl LocalStateStore for TracedStateStore { fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future> + Send + '_ { self.inner.may_exist(key_range, read_options) @@ -119,7 +120,7 @@ impl LocalStateStore for TracedStateStore { fn get( &self, - key: Bytes, + key: TableKey, read_options: ReadOptions, ) -> impl Future>> + '_ { self.traced_get( @@ -132,11 +133,13 @@ impl LocalStateStore for TracedStateStore { fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { + let (l, r) = key_range.clone(); + let bytes_key_range = (l.map(|l| l.0), r.map(|r| r.0)); let span = TraceSpan::new_iter_span( - key_range.clone(), + bytes_key_range, None, read_options.clone().into(), self.storage_type, @@ -145,9 +148,14 @@ impl LocalStateStore for TracedStateStore { .map_ok(identity) } - fn insert(&mut self, key: Bytes, new_val: Bytes, old_val: Option) -> StorageResult<()> { + fn insert( + &mut self, + key: TableKey, + new_val: Bytes, + old_val: Option, + ) -> StorageResult<()> { let span = TraceSpan::new_insert_span( - key.clone(), + key.0.clone(), new_val.clone(), old_val.clone(), self.storage_type, @@ -158,8 +166,8 @@ impl LocalStateStore for TracedStateStore { res } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { - let span = TraceSpan::new_delete_span(key.clone(), old_val.clone(), self.storage_type); + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { + let span = TraceSpan::new_delete_span(key.0.clone(), old_val.clone(), self.storage_type); let res = self.inner.delete(key, old_val); @@ -263,7 +271,7 @@ impl StateStoreRead for TracedStateStore { fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { @@ -277,12 +285,14 @@ impl StateStoreRead for TracedStateStore { fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> impl Future> + '_ { + let (l, r) = key_range.clone(); + let bytes_key_range = (l.map(|l| l.0), r.map(|r| r.0)); let span = TraceSpan::new_iter_span( - key_range.clone(), + bytes_key_range, Some(epoch), read_options.clone().into(), self.storage_type, diff --git a/src/storage/src/opts.rs b/src/storage/src/opts.rs index 22dac37c3a3f0..a3243cbc8c465 100644 --- a/src/storage/src/opts.rs +++ b/src/storage/src/opts.rs @@ -75,13 +75,19 @@ pub struct StorageOpts { pub data_file_cache_recover_concurrency: usize, pub data_file_cache_lfu_window_to_cache_size_ratio: usize, pub data_file_cache_lfu_tiny_lru_capacity_ratio: f64, - pub data_file_cache_rated_random_rate_mb: usize, + pub data_file_cache_insert_rate_limit_mb: usize, pub data_file_cache_flush_rate_limit_mb: usize, pub data_file_cache_reclaim_rate_limit_mb: usize, + pub data_file_cache_allocation_bits: usize, + pub data_file_cache_allocation_timeout_ms: usize, pub cache_refill_data_refill_levels: Vec, pub cache_refill_timeout_ms: u64, pub cache_refill_concurrency: usize, + pub cache_refill_recent_filter_layers: usize, + pub cache_refill_recent_filter_rotate_interval_ms: usize, + pub cache_refill_unit: usize, + pub cache_refill_threshold: f64, pub meta_file_cache_dir: String, pub meta_file_cache_capacity_mb: usize, @@ -94,9 +100,11 @@ pub struct StorageOpts { pub meta_file_cache_recover_concurrency: usize, pub meta_file_cache_lfu_window_to_cache_size_ratio: usize, pub meta_file_cache_lfu_tiny_lru_capacity_ratio: f64, - pub meta_file_cache_rated_random_rate_mb: usize, + pub meta_file_cache_insert_rate_limit_mb: usize, pub meta_file_cache_flush_rate_limit_mb: usize, pub meta_file_cache_reclaim_rate_limit_mb: usize, + pub meta_file_cache_allocation_bits: usize, + pub meta_file_cache_allocation_timeout_ms: usize, /// The storage url for storing backups. pub backup_storage_url: String, @@ -117,6 +125,8 @@ pub struct StorageOpts { pub compactor_max_sst_key_count: u64, pub compactor_max_task_multiplier: f32, pub compactor_max_sst_size: u64, + /// enable FastCompactorRunner. + pub enable_fast_compaction: bool, } impl Default for StorageOpts { @@ -171,9 +181,11 @@ impl From<(&RwConfig, &SystemParamsReader, &StorageMemoryConfig)> for StorageOpt .storage .data_file_cache .lfu_tiny_lru_capacity_ratio, - data_file_cache_rated_random_rate_mb: c.storage.data_file_cache.rated_random_rate_mb, + data_file_cache_insert_rate_limit_mb: c.storage.data_file_cache.insert_rate_limit_mb, data_file_cache_flush_rate_limit_mb: c.storage.data_file_cache.flush_rate_limit_mb, data_file_cache_reclaim_rate_limit_mb: c.storage.data_file_cache.reclaim_rate_limit_mb, + data_file_cache_allocation_bits: c.storage.data_file_cache.allocation_bits, + data_file_cache_allocation_timeout_ms: c.storage.data_file_cache.allocation_timeout_ms, meta_file_cache_dir: c.storage.meta_file_cache.dir.clone(), meta_file_cache_capacity_mb: c.storage.meta_file_cache.capacity_mb, meta_file_cache_file_capacity_mb: c.storage.meta_file_cache.file_capacity_mb, @@ -191,12 +203,21 @@ impl From<(&RwConfig, &SystemParamsReader, &StorageMemoryConfig)> for StorageOpt .storage .meta_file_cache .lfu_tiny_lru_capacity_ratio, - meta_file_cache_rated_random_rate_mb: c.storage.meta_file_cache.rated_random_rate_mb, + meta_file_cache_insert_rate_limit_mb: c.storage.meta_file_cache.insert_rate_limit_mb, meta_file_cache_flush_rate_limit_mb: c.storage.meta_file_cache.flush_rate_limit_mb, meta_file_cache_reclaim_rate_limit_mb: c.storage.meta_file_cache.reclaim_rate_limit_mb, + meta_file_cache_allocation_bits: c.storage.meta_file_cache.allocation_bits, + meta_file_cache_allocation_timeout_ms: c.storage.meta_file_cache.allocation_timeout_ms, cache_refill_data_refill_levels: c.storage.cache_refill.data_refill_levels.clone(), cache_refill_timeout_ms: c.storage.cache_refill.timeout_ms, cache_refill_concurrency: c.storage.cache_refill.concurrency, + cache_refill_recent_filter_layers: c.storage.cache_refill.recent_filter_layers, + cache_refill_recent_filter_rotate_interval_ms: c + .storage + .cache_refill + .recent_filter_rotate_interval_ms, + cache_refill_unit: c.storage.cache_refill.unit, + cache_refill_threshold: c.storage.cache_refill.threshold, max_preload_wait_time_mill: c.storage.max_preload_wait_time_mill, object_store_streaming_read_timeout_ms: c .storage @@ -213,6 +234,7 @@ impl From<(&RwConfig, &SystemParamsReader, &StorageMemoryConfig)> for StorageOpt compactor_max_sst_key_count: c.storage.compactor_max_sst_key_count, compactor_max_task_multiplier: c.storage.compactor_max_task_multiplier, compactor_max_sst_size: c.storage.compactor_max_sst_size, + enable_fast_compaction: c.storage.enable_fast_compaction, } } } diff --git a/src/storage/src/panic_store.rs b/src/storage/src/panic_store.rs index 53162e29637fd..de4e9f38032d7 100644 --- a/src/storage/src/panic_store.rs +++ b/src/storage/src/panic_store.rs @@ -18,6 +18,7 @@ use std::task::{Context, Poll}; use bytes::Bytes; use futures::Stream; +use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; use risingwave_hummock_sdk::HummockReadEpoch; use crate::error::StorageResult; @@ -35,7 +36,7 @@ impl StateStoreRead for PanicStateStore { #[allow(clippy::unused_async)] async fn get( &self, - _key: Bytes, + _key: TableKey, _epoch: u64, _read_options: ReadOptions, ) -> StorageResult> { @@ -45,7 +46,7 @@ impl StateStoreRead for PanicStateStore { #[allow(clippy::unused_async)] async fn iter( &self, - _key_range: IterKeyRange, + _key_range: TableKeyRange, _epoch: u64, _read_options: ReadOptions, ) -> StorageResult { @@ -57,11 +58,11 @@ impl StateStoreWrite for PanicStateStore { #[allow(clippy::unused_async)] async fn ingest_batch( &self, - _kv_pairs: Vec<(Bytes, StorageValue)>, + _kv_pairs: Vec<(TableKey, StorageValue)>, _delete_ranges: Vec<(Bound, Bound)>, _write_options: WriteOptions, ) -> StorageResult { - panic!("should not read from the state store!"); + panic!("should not write to the state store!"); } } @@ -71,21 +72,25 @@ impl LocalStateStore for PanicStateStore { #[allow(clippy::unused_async)] async fn may_exist( &self, - _key_range: IterKeyRange, + _key_range: TableKeyRange, _read_options: ReadOptions, ) -> StorageResult { panic!("should not call may_exist from the state store!"); } #[allow(clippy::unused_async)] - async fn get(&self, _key: Bytes, _read_options: ReadOptions) -> StorageResult> { + async fn get( + &self, + _key: TableKey, + _read_options: ReadOptions, + ) -> StorageResult> { panic!("should not operate on the panic state store!"); } #[allow(clippy::unused_async)] async fn iter( &self, - _key_range: IterKeyRange, + _key_range: TableKeyRange, _read_options: ReadOptions, ) -> StorageResult> { panic!("should not operate on the panic state store!"); @@ -93,14 +98,14 @@ impl LocalStateStore for PanicStateStore { fn insert( &mut self, - _key: Bytes, + _key: TableKey, _new_val: Bytes, _old_val: Option, ) -> StorageResult<()> { panic!("should not operate on the panic state store!"); } - fn delete(&mut self, _key: Bytes, _old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, _key: TableKey, _old_val: Bytes) -> StorageResult<()> { panic!("should not operate on the panic state store!"); } diff --git a/src/storage/src/row_serde/row_serde_util.rs b/src/storage/src/row_serde/row_serde_util.rs index d426789829797..b8ef0dba29fec 100644 --- a/src/storage/src/row_serde/row_serde_util.rs +++ b/src/storage/src/row_serde/row_serde_util.rs @@ -16,6 +16,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use risingwave_common::hash::VirtualNode; use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::util::row_serde::OrderedRowSerde; +use risingwave_hummock_sdk::key::TableKey; pub fn serialize_pk(pk: impl Row, serializer: &OrderedRowSerde) -> Bytes { let mut buf = BytesMut::with_capacity(pk.len()); @@ -27,11 +28,11 @@ pub fn serialize_pk_with_vnode( pk: impl Row, serializer: &OrderedRowSerde, vnode: VirtualNode, -) -> Bytes { +) -> TableKey { let mut buffer = BytesMut::new(); buffer.put_slice(&vnode.to_be_bytes()[..]); pk.memcmp_serialize_into(serializer, &mut buffer); - buffer.freeze() + TableKey(buffer.freeze()) } pub fn deserialize_pk_with_vnode( diff --git a/src/storage/src/row_serde/value_serde.rs b/src/storage/src/row_serde/value_serde.rs index 43156500f3f7c..9048b90c23a53 100644 --- a/src/storage/src/row_serde/value_serde.rs +++ b/src/storage/src/row_serde/value_serde.rs @@ -27,8 +27,8 @@ use risingwave_common::util::value_encoding::column_aware_row_encoding::{ }; use risingwave_common::util::value_encoding::error::ValueEncodingError; use risingwave_common::util::value_encoding::{ - BasicSerde, BasicSerializer, EitherSerde, ValueRowDeserializer, ValueRowSerdeKind, - ValueRowSerializer, + BasicSerde, BasicSerializer, DatumFromProtoExt, EitherSerde, ValueRowDeserializer, + ValueRowSerdeKind, ValueRowSerializer, }; use risingwave_expr::expr::build_from_prost; use risingwave_pb::plan_common::column_desc::GeneratedOrDefaultColumn; @@ -98,21 +98,28 @@ impl ValueRowSerdeNew for ColumnAwareSerde { } let column_with_default = table_columns.iter().enumerate().filter_map(|(i, c)| { - if c.is_default() { - if let GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc { expr }) = - c.generated_or_default_column.clone().unwrap() - { - Some(( - i, - build_from_prost(&expr.expect("expr should not be none")) - .expect("build_from_prost error") - .eval_row_infallible(&OwnedRow::empty(), |_err| {}) - .now_or_never() - .expect("constant expression should not be async"), - )) + if let Some(GeneratedOrDefaultColumn::DefaultColumn(DefaultColumnDesc { + snapshot_value, + expr, + })) = c.generated_or_default_column.clone() + { + // TODO: may not panic on error + let value = if let Some(snapshot_value) = snapshot_value { + // If there's a `snapshot_value`, we can use it directly. + Datum::from_protobuf(&snapshot_value, &c.data_type) + .expect("invalid default value") } else { - unreachable!() - } + // For backward compatibility, default columns in old tables may not have `snapshot_value`. + // In this case, we need to evaluate the expression to get the default value. + // It's okay since we previously banned impure expressions in default columns. + build_from_prost(&expr.expect("expr should not be none")) + .expect("build_from_prost error") + .eval_row(&OwnedRow::empty()) + .now_or_never() + .expect("constant expression should not be async") + .expect("eval_row failed") + }; + Some((i, value)) } else { None } diff --git a/src/storage/src/store.rs b/src/storage/src/store.rs index 4150e31d616e8..2cef99ab4e651 100644 --- a/src/storage/src/store.rs +++ b/src/storage/src/store.rs @@ -22,7 +22,7 @@ use futures::{Stream, StreamExt, TryStreamExt}; use futures_async_stream::try_stream; use risingwave_common::catalog::{TableId, TableOption}; use risingwave_common::util::epoch::{Epoch, EpochPair}; -use risingwave_hummock_sdk::key::{FullKey, KeyPayloadType}; +use risingwave_hummock_sdk::key::{FullKey, TableKey, TableKeyRange}; use risingwave_hummock_sdk::{HummockReadEpoch, LocalSstableInfo}; use risingwave_hummock_trace::{ TracedInitOptions, TracedNewLocalOptions, TracedPrefetchOptions, TracedReadOptions, @@ -33,7 +33,6 @@ use crate::error::{StorageError, StorageResult}; use crate::hummock::CachePolicy; use crate::monitor::{MonitoredStateStore, MonitoredStorageMetrics}; use crate::storage_value::StorageValue; -use crate::write_batch::WriteBatch; pub trait StaticSendSync = Send + Sync + 'static; @@ -70,8 +69,6 @@ pub type StateStoreIterItem = (FullKey, Bytes); pub trait StateStoreIterItemStream = Stream> + Send; pub trait StateStoreReadIterStream = StateStoreIterItemStream + 'static; -pub type IterKeyRange = (Bound, Bound); - pub trait StateStoreRead: StaticSendSync { type IterStream: StateStoreReadIterStream; @@ -79,7 +76,7 @@ pub trait StateStoreRead: StaticSendSync { /// The result is based on a snapshot corresponding to the given `epoch`. fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> impl Future>> + Send + '_; @@ -91,7 +88,7 @@ pub trait StateStoreRead: StaticSendSync { /// corresponding to the given `epoch`. fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> impl Future> + Send + '_; @@ -107,7 +104,7 @@ pub trait StateStoreReadExt: StaticSendSync { /// By default, this simply calls `StateStore::iter` to fetch elements. fn scan( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, limit: Option, read_options: ReadOptions, @@ -117,7 +114,7 @@ pub trait StateStoreReadExt: StaticSendSync { impl StateStoreReadExt for S { async fn scan( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, limit: Option, mut read_options: ReadOptions, @@ -151,18 +148,10 @@ pub trait StateStoreWrite: StaticSendSync { /// per-key modification history (e.g. in compaction), not across different keys. fn ingest_batch( &self, - kv_pairs: Vec<(Bytes, StorageValue)>, + kv_pairs: Vec<(TableKey, StorageValue)>, delete_ranges: Vec<(Bound, Bound)>, write_options: WriteOptions, ) -> impl Future> + Send + '_; - - /// Creates a `WriteBatch` associated with this state store. - fn start_write_batch(&self, write_options: WriteOptions) -> WriteBatch<'_, Self> - where - Self: Sized, - { - WriteBatch::new(self, write_options) - } } #[derive(Default, Debug)] @@ -213,7 +202,7 @@ pub trait LocalStateStore: StaticSendSync { /// The result is based on the latest written snapshot. fn get( &self, - key: Bytes, + key: TableKey, read_options: ReadOptions, ) -> impl Future>> + Send + '_; @@ -224,16 +213,21 @@ pub trait LocalStateStore: StaticSendSync { /// snapshot. fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_; /// Inserts a key-value entry associated with a given `epoch` into the state store. - fn insert(&mut self, key: Bytes, new_val: Bytes, old_val: Option) -> StorageResult<()>; + fn insert( + &mut self, + key: TableKey, + new_val: Bytes, + old_val: Option, + ) -> StorageResult<()>; /// Deletes a key-value entry from the state store. Only the key-value entry with epoch smaller /// than the given `epoch` will be deleted. - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()>; + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()>; fn flush( &mut self, @@ -268,7 +262,7 @@ pub trait LocalStateStore: StaticSendSync { /// - true: `key_range` may or may not exist in storage. fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future> + Send + '_; } diff --git a/src/storage/src/store_impl.rs b/src/storage/src/store_impl.rs index 54d79c11d741f..8460b75ebc0dc 100644 --- a/src/storage/src/store_impl.rs +++ b/src/storage/src/store_impl.rs @@ -15,6 +15,7 @@ use std::fmt::Debug; use std::path::PathBuf; use std::sync::Arc; +use std::time::Duration; use enum_as_inner::EnumAsInner; use risingwave_common::monitor::GLOBAL_METRICS_REGISTRY; @@ -22,15 +23,12 @@ use risingwave_common_service::observer_manager::RpcNotificationClient; use risingwave_object_store::object::parse_remote_object_store; use crate::error::StorageResult; -use crate::filter_key_extractor::{ - FilterKeyExtractorManager, RemoteTableAccessor, RpcFilterKeyExtractorManager, -}; -use crate::hummock::backup_reader::BackupReaderRef; +use crate::filter_key_extractor::{RemoteTableAccessor, RpcFilterKeyExtractorManager}; +use crate::hummock::file_cache::preclude::*; use crate::hummock::hummock_meta_client::MonitoredHummockMetaClient; -use crate::hummock::sstable_store::SstableStoreRef; use crate::hummock::{ - set_foyer_metrics_registry, FileCache, FoyerRuntimeConfig, FoyerStoreConfig, HummockError, - HummockStorage, MemoryLimiter, SstableObjectIdManagerRef, SstableStore, + set_foyer_metrics_registry, FileCache, FileCacheConfig, HummockError, HummockStorage, + RecentFilter, SstableStore, }; use crate::memory::sled::SledStateStore; use crate::memory::MemoryStateStore; @@ -41,9 +39,9 @@ use crate::monitor::{ use crate::opts::StorageOpts; use crate::StateStore; -pub type HummockStorageType = impl StateStore + AsHummockTrait; -pub type MemoryStateStoreType = impl StateStore + AsHummockTrait; -pub type SledStateStoreType = impl StateStore + AsHummockTrait; +pub type HummockStorageType = impl StateStore + AsHummock; +pub type MemoryStateStoreType = impl StateStore + AsHummock; +pub type SledStateStoreType = impl StateStore + AsHummock; /// The type erased [`StateStore`]. #[derive(Clone, EnumAsInner)] @@ -66,9 +64,7 @@ pub enum StateStoreImpl { SledStateStore(Monitored), } -fn may_dynamic_dispatch( - state_store: impl StateStore + AsHummockTrait, -) -> impl StateStore + AsHummockTrait { +fn may_dynamic_dispatch(state_store: impl StateStore + AsHummock) -> impl StateStore + AsHummock { #[cfg(not(debug_assertions))] { state_store @@ -80,7 +76,7 @@ fn may_dynamic_dispatch( } } -fn may_verify(state_store: impl StateStore + AsHummockTrait) -> impl StateStore + AsHummockTrait { +fn may_verify(state_store: impl StateStore + AsHummock) -> impl StateStore + AsHummock { #[cfg(not(debug_assertions))] { state_store @@ -143,30 +139,11 @@ impl StateStoreImpl { ) } - pub fn as_hummock_trait(&self) -> Option<&dyn HummockTrait> { - { - match self { - StateStoreImpl::HummockStateStore(hummock) => Some( - hummock - .inner() - .as_hummock_trait() - .expect("should be hummock"), - ), - _ => None, - } - } - } - pub fn as_hummock(&self) -> Option<&HummockStorage> { match self { - StateStoreImpl::HummockStateStore(hummock) => Some( - hummock - .inner() - .as_hummock_trait() - .expect("should be hummock") - .as_hummock() - .expect("should be hummock"), - ), + StateStoreImpl::HummockStateStore(hummock) => { + Some(hummock.inner().as_hummock().expect("should be hummock")) + } _ => None, } } @@ -230,13 +207,15 @@ pub mod verify { use bytes::Bytes; use futures::{pin_mut, TryStreamExt}; use futures_async_stream::try_stream; + use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; use risingwave_hummock_sdk::HummockReadEpoch; use tracing::log::warn; use crate::error::{StorageError, StorageResult}; + use crate::hummock::HummockStorage; use crate::storage_value::StorageValue; use crate::store::*; - use crate::store_impl::{AsHummockTrait, HummockTrait}; + use crate::store_impl::AsHummock; use crate::StateStore; fn assert_result_eq( @@ -263,9 +242,9 @@ pub mod verify { pub expected: Option, } - impl AsHummockTrait for VerifyStateStore { - fn as_hummock_trait(&self) -> Option<&dyn HummockTrait> { - self.actual.as_hummock_trait() + impl AsHummock for VerifyStateStore { + fn as_hummock(&self) -> Option<&HummockStorage> { + self.actual.as_hummock() } } @@ -274,7 +253,7 @@ pub mod verify { async fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> StorageResult> { @@ -294,7 +273,7 @@ pub mod verify { #[allow(clippy::manual_async_fn)] fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> impl Future> + '_ { @@ -340,7 +319,7 @@ pub mod verify { impl StateStoreWrite for VerifyStateStore { async fn ingest_batch( &self, - kv_pairs: Vec<(Bytes, StorageValue)>, + kv_pairs: Vec<(TableKey, StorageValue)>, delete_ranges: Vec<(Bound, Bound)>, write_options: WriteOptions, ) -> StorageResult { @@ -379,13 +358,17 @@ pub mod verify { // be consistent across different state store backends. fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future> + Send + '_ { self.actual.may_exist(key_range, read_options) } - async fn get(&self, key: Bytes, read_options: ReadOptions) -> StorageResult> { + async fn get( + &self, + key: TableKey, + read_options: ReadOptions, + ) -> StorageResult> { let actual = self.actual.get(key.clone(), read_options.clone()).await; if let Some(expected) = &self.expected { let expected = expected.get(key, read_options).await; @@ -397,7 +380,7 @@ pub mod verify { #[allow(clippy::manual_async_fn)] fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { async move { @@ -417,7 +400,7 @@ pub mod verify { fn insert( &mut self, - key: Bytes, + key: TableKey, new_val: Bytes, old_val: Option, ) -> StorageResult<()> { @@ -429,7 +412,7 @@ pub mod verify { Ok(()) } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { if let Some(expected) = &mut self.expected { expected.delete(key.clone(), old_val.clone())?; } @@ -544,12 +527,12 @@ impl StateStoreImpl { ) -> StorageResult { set_foyer_metrics_registry(GLOBAL_METRICS_REGISTRY.clone()); - let data_file_cache = if opts.data_file_cache_dir.is_empty() { - FileCache::none() + let (data_file_cache, recent_filter) = if opts.data_file_cache_dir.is_empty() { + (FileCache::none(), None) } else { const MB: usize = 1024 * 1024; - let foyer_store_config = FoyerStoreConfig { + let config = FileCacheConfig { name: "data".to_string(), dir: PathBuf::from(opts.data_file_cache_dir.clone()), capacity: opts.data_file_cache_capacity_mb * MB, @@ -559,22 +542,27 @@ impl StateStoreImpl { device_io_size: opts.data_file_cache_device_io_size, lfu_window_to_cache_size_ratio: opts.data_file_cache_lfu_window_to_cache_size_ratio, lfu_tiny_lru_capacity_ratio: opts.data_file_cache_lfu_tiny_lru_capacity_ratio, - rated_random_rate: opts.data_file_cache_rated_random_rate_mb * MB, + insert_rate_limit: opts.data_file_cache_insert_rate_limit_mb * MB, flushers: opts.data_file_cache_flushers, reclaimers: opts.data_file_cache_reclaimers, flush_rate_limit: opts.data_file_cache_flush_rate_limit_mb * MB, reclaim_rate_limit: opts.data_file_cache_reclaim_rate_limit_mb * MB, recover_concurrency: opts.data_file_cache_recover_concurrency, - event_listener: vec![], - enable_filter: !opts.cache_refill_data_refill_levels.is_empty(), - }; - let config = FoyerRuntimeConfig { - foyer_store_config, - runtime_worker_threads: None, + allocator_bits: opts.data_file_cache_allocation_bits, + allocation_timeout: Duration::from_millis( + opts.data_file_cache_allocation_timeout_ms as u64, + ), + admissions: vec![], + reinsertions: vec![], }; - FileCache::foyer(config) + let cache = FileCache::open(config) .await - .map_err(HummockError::file_cache)? + .map_err(HummockError::file_cache)?; + let filter = Some(Arc::new(RecentFilter::new( + opts.cache_refill_recent_filter_layers, + Duration::from_millis(opts.cache_refill_recent_filter_rotate_interval_ms as u64), + ))); + (cache, filter) }; let meta_file_cache = if opts.meta_file_cache_dir.is_empty() { @@ -582,7 +570,7 @@ impl StateStoreImpl { } else { const MB: usize = 1024 * 1024; - let foyer_store_config = FoyerStoreConfig { + let config = FileCacheConfig { name: "meta".to_string(), dir: PathBuf::from(opts.meta_file_cache_dir.clone()), capacity: opts.meta_file_cache_capacity_mb * MB, @@ -592,20 +580,20 @@ impl StateStoreImpl { device_io_size: opts.meta_file_cache_device_io_size, lfu_window_to_cache_size_ratio: opts.meta_file_cache_lfu_window_to_cache_size_ratio, lfu_tiny_lru_capacity_ratio: opts.meta_file_cache_lfu_tiny_lru_capacity_ratio, - rated_random_rate: opts.meta_file_cache_rated_random_rate_mb * MB, + insert_rate_limit: opts.meta_file_cache_insert_rate_limit_mb * MB, flushers: opts.meta_file_cache_flushers, reclaimers: opts.meta_file_cache_reclaimers, flush_rate_limit: opts.meta_file_cache_flush_rate_limit_mb * MB, reclaim_rate_limit: opts.meta_file_cache_reclaim_rate_limit_mb * MB, recover_concurrency: opts.meta_file_cache_recover_concurrency, - event_listener: vec![], - enable_filter: false, - }; - let config = FoyerRuntimeConfig { - foyer_store_config, - runtime_worker_threads: None, + allocator_bits: opts.meta_file_cache_allocation_bits, + allocation_timeout: Duration::from_millis( + opts.meta_file_cache_allocation_timeout_ms as u64, + ), + admissions: vec![], + reinsertions: vec![], }; - FileCache::foyer(config) + FileCache::open(config) .await .map_err(HummockError::file_cache)? }; @@ -633,6 +621,7 @@ impl StateStoreImpl { opts.high_priority_ratio, data_file_cache, meta_file_cache, + recent_filter, )); let notification_client = RpcNotificationClient::new(hummock_meta_client.get_inner().clone()); @@ -671,60 +660,24 @@ impl StateStoreImpl { } } -/// This trait is for aligning some common methods of `state_store_impl` for external use -pub trait HummockTrait { - fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef; - fn sstable_store(&self) -> SstableStoreRef; - fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManager; - fn get_memory_limiter(&self) -> Arc; - fn backup_reader(&self) -> BackupReaderRef; +pub trait AsHummock { fn as_hummock(&self) -> Option<&HummockStorage>; } -impl HummockTrait for HummockStorage { - fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef { - self.sstable_object_id_manager() - } - - fn sstable_store(&self) -> SstableStoreRef { - self.sstable_store() - } - - fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManager { - self.filter_key_extractor_manager() - } - - fn get_memory_limiter(&self) -> Arc { - self.get_memory_limiter() - } - - fn backup_reader(&self) -> BackupReaderRef { - self.backup_reader() - } - +impl AsHummock for HummockStorage { fn as_hummock(&self) -> Option<&HummockStorage> { Some(self) } } -pub trait AsHummockTrait { - fn as_hummock_trait(&self) -> Option<&dyn HummockTrait>; -} - -impl AsHummockTrait for HummockStorage { - fn as_hummock_trait(&self) -> Option<&dyn HummockTrait> { - Some(self) - } -} - -impl AsHummockTrait for MemoryStateStore { - fn as_hummock_trait(&self) -> Option<&dyn HummockTrait> { +impl AsHummock for MemoryStateStore { + fn as_hummock(&self) -> Option<&HummockStorage> { None } } -impl AsHummockTrait for SledStateStore { - fn as_hummock_trait(&self) -> Option<&dyn HummockTrait> { +impl AsHummock for SledStateStore { + fn as_hummock(&self) -> Option<&HummockStorage> { None } } @@ -735,13 +688,16 @@ pub mod boxed_state_store { use std::ops::{Bound, Deref, DerefMut}; use bytes::Bytes; + use dyn_clone::{clone_trait_object, DynClone}; use futures::stream::BoxStream; use futures::StreamExt; + use risingwave_hummock_sdk::key::{TableKey, TableKeyRange}; use risingwave_hummock_sdk::HummockReadEpoch; use crate::error::StorageResult; + use crate::hummock::HummockStorage; use crate::store::*; - use crate::store_impl::{AsHummockTrait, HummockTrait}; + use crate::store_impl::AsHummock; use crate::StateStore; // For StateStoreRead @@ -752,14 +708,14 @@ pub mod boxed_state_store { pub trait DynamicDispatchedStateStoreRead: StaticSendSync { async fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> StorageResult>; async fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> StorageResult; @@ -769,7 +725,7 @@ pub mod boxed_state_store { impl DynamicDispatchedStateStoreRead for S { async fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> StorageResult> { @@ -778,7 +734,7 @@ pub mod boxed_state_store { async fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> StorageResult { @@ -792,26 +748,30 @@ pub mod boxed_state_store { pub trait DynamicDispatchedLocalStateStore: StaticSendSync { async fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult; - async fn get(&self, key: Bytes, read_options: ReadOptions) -> StorageResult>; + async fn get( + &self, + key: TableKey, + read_options: ReadOptions, + ) -> StorageResult>; async fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult>; fn insert( &mut self, - key: Bytes, + key: TableKey, new_val: Bytes, old_val: Option, ) -> StorageResult<()>; - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()>; + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()>; async fn flush( &mut self, @@ -831,19 +791,23 @@ pub mod boxed_state_store { impl DynamicDispatchedLocalStateStore for S { async fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult { self.may_exist(key_range, read_options).await } - async fn get(&self, key: Bytes, read_options: ReadOptions) -> StorageResult> { + async fn get( + &self, + key: TableKey, + read_options: ReadOptions, + ) -> StorageResult> { self.get(key, read_options).await } async fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult> { Ok(self.iter(key_range, read_options).await?.boxed()) @@ -851,14 +815,14 @@ pub mod boxed_state_store { fn insert( &mut self, - key: Bytes, + key: TableKey, new_val: Bytes, old_val: Option, ) -> StorageResult<()> { self.insert(key, new_val, old_val) } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { self.delete(key, old_val) } @@ -893,7 +857,7 @@ pub mod boxed_state_store { fn may_exist( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future> + Send + '_ { self.deref().may_exist(key_range, read_options) @@ -901,7 +865,7 @@ pub mod boxed_state_store { fn get( &self, - key: Bytes, + key: TableKey, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { self.deref().get(key, read_options) @@ -909,7 +873,7 @@ pub mod boxed_state_store { fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { self.deref().iter(key_range, read_options) @@ -917,14 +881,14 @@ pub mod boxed_state_store { fn insert( &mut self, - key: Bytes, + key: TableKey, new_val: Bytes, old_val: Option, ) -> StorageResult<()> { self.deref_mut().insert(key, new_val, old_val) } - fn delete(&mut self, key: Bytes, old_val: Bytes) -> StorageResult<()> { + fn delete(&mut self, key: TableKey, old_val: Bytes) -> StorageResult<()> { self.deref_mut().delete(key, old_val) } @@ -1006,7 +970,7 @@ pub mod boxed_state_store { fn get( &self, - key: Bytes, + key: TableKey, epoch: u64, read_options: ReadOptions, ) -> impl Future>> + Send + '_ { @@ -1015,7 +979,7 @@ pub mod boxed_state_store { fn iter( &self, - key_range: IterKeyRange, + key_range: TableKeyRange, epoch: u64, read_options: ReadOptions, ) -> impl Future> + '_ { @@ -1023,44 +987,23 @@ pub mod boxed_state_store { } } - // With this trait, we can implement `Clone` for BoxDynamicDispatchedStateStore - pub trait DynamicDispatchedStateStoreCloneBox { - fn clone_box(&self) -> BoxDynamicDispatchedStateStore; - } - pub trait DynamicDispatchedStateStore: - DynamicDispatchedStateStoreCloneBox - + DynamicDispatchedStateStoreRead - + DynamicDispatchedStateStoreExt - + AsHummockTrait - { - } - - impl< - S: DynamicDispatchedStateStoreCloneBox - + DynamicDispatchedStateStoreRead - + DynamicDispatchedStateStoreExt - + AsHummockTrait, - > DynamicDispatchedStateStore for S + DynClone + DynamicDispatchedStateStoreRead + DynamicDispatchedStateStoreExt + AsHummock { } - impl DynamicDispatchedStateStoreCloneBox for S { - fn clone_box(&self) -> BoxDynamicDispatchedStateStore { - Box::new(self.clone()) - } - } + clone_trait_object!(DynamicDispatchedStateStore); - impl AsHummockTrait for BoxDynamicDispatchedStateStore { - fn as_hummock_trait(&self) -> Option<&dyn HummockTrait> { - self.deref().as_hummock_trait() + impl AsHummock for BoxDynamicDispatchedStateStore { + fn as_hummock(&self) -> Option<&HummockStorage> { + self.deref().as_hummock() } } - impl Clone for BoxDynamicDispatchedStateStore { - fn clone(&self) -> Self { - self.deref().clone_box() - } + impl< + S: DynClone + DynamicDispatchedStateStoreRead + DynamicDispatchedStateStoreExt + AsHummock, + > DynamicDispatchedStateStore for S + { } impl StateStore for BoxDynamicDispatchedStateStore { diff --git a/src/storage/src/table/batch_table/storage_table.rs b/src/storage/src/table/batch_table/storage_table.rs index dc386a719ec22..f784c4a5e7ca2 100644 --- a/src/storage/src/table/batch_table/storage_table.rs +++ b/src/storage/src/table/batch_table/storage_table.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::assert_matches::assert_matches; use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::ops::{Index, RangeBounds}; use std::sync::Arc; @@ -33,7 +32,9 @@ use risingwave_common::util::row_serde::*; use risingwave_common::util::sort_util::OrderType; use risingwave_common::util::value_encoding::column_aware_row_encoding::ColumnAwareSerde; use risingwave_common::util::value_encoding::{BasicSerde, EitherSerde}; -use risingwave_hummock_sdk::key::{end_bound_of_prefix, next_key, prefixed_range}; +use risingwave_hummock_sdk::key::{ + end_bound_of_prefix, map_table_key_range, next_key, prefixed_range, TableKeyRange, +}; use risingwave_hummock_sdk::HummockReadEpoch; use tracing::trace; @@ -44,7 +45,7 @@ use crate::row_serde::value_serde::{ValueRowSerde, ValueRowSerdeNew}; use crate::row_serde::{find_columns_by_ids, ColumnMapping}; use crate::store::{PrefetchOptions, ReadOptions}; use crate::table::merge_sort::merge_sort; -use crate::table::{compute_vnode, Distribution, KeyedRow, TableIter, DEFAULT_VNODE}; +use crate::table::{compute_vnode, Distribution, KeyedRow, TableIter}; use crate::StateStore; /// [`StorageTableInner`] is the interface accessing relational data in KV(`StateStore`) with @@ -416,24 +417,7 @@ impl StorageTableInner { _ => CachePolicy::Fill(CachePriority::High), }; - let raw_key_ranges = if !ordered - && matches!(encoded_key_range.start_bound(), Unbounded) - && matches!(encoded_key_range.end_bound(), Unbounded) - { - // If the range is unbounded and order is not required, we can create a single iterator - // for each continuous vnode range. - - // In this case, the `vnode_hint` must be default for singletons and `None` for - // distributed tables. - assert_eq!(vnode_hint.unwrap_or(DEFAULT_VNODE), DEFAULT_VNODE); - - Either::Left(self.vnodes.vnode_ranges().map(|r| { - let start = Included(Bytes::copy_from_slice(&r.start().to_be_bytes()[..])); - let end = end_bound_of_prefix(&r.end().to_be_bytes()); - assert_matches!(end, Excluded(_) | Unbounded); - (start, end) - })) - } else { + let raw_key_ranges = { // Vnodes that are set and should be accessed. let vnodes = match vnode_hint { // If `vnode_hint` is set, we can only access this single vnode. @@ -441,13 +425,12 @@ impl StorageTableInner { // Otherwise, we need to access all vnodes of this table. None => Either::Right(self.vnodes.iter_vnodes()), }; - Either::Right( - vnodes.map(|vnode| prefixed_range(encoded_key_range.clone(), &vnode.to_be_bytes())), - ) + vnodes.map(|vnode| prefixed_range(encoded_key_range.clone(), &vnode.to_be_bytes())) }; // For each key range, construct an iterator. let iterators: Vec<_> = try_join_all(raw_key_ranges.map(|raw_key_range| { + let table_key_range = map_table_key_range(raw_key_range); let prefix_hint = prefix_hint.clone(); let read_backup = matches!(wait_epoch, HummockReadEpoch::Backup(_)); async move { @@ -473,7 +456,7 @@ impl StorageTableInner { self.value_output_indices.clone(), self.output_row_in_key_indices.clone(), self.row_serde.clone(), - raw_key_range, + table_key_range, read_options, wait_epoch, ) @@ -490,7 +473,10 @@ impl StorageTableInner { 0 => unreachable!(), 1 => iterators.into_iter().next().unwrap(), // Concat all iterators if not to preserve order. - _ if !ordered => futures::stream::iter(iterators).flatten(), + _ if !ordered => { + futures::stream::iter(iterators.into_iter().map(Box::pin).collect_vec()) + .flatten_unordered(1024) + } // Merge all iterators if to preserve order. _ => merge_sort(iterators.into_iter().map(Box::pin).collect()), }; @@ -680,13 +666,13 @@ impl StorageTableInnerIterInner { value_output_indices: Vec, output_row_in_key_indices: Vec, row_deserializer: Arc, - raw_key_range: (Bound, Bound), + table_key_range: TableKeyRange, read_options: ReadOptions, epoch: HummockReadEpoch, ) -> StorageResult { let raw_epoch = epoch.get_epoch(); store.try_wait_epoch(epoch).await?; - let iter = store.iter(raw_key_range, raw_epoch, read_options).await?; + let iter = store.iter(table_key_range, raw_epoch, read_options).await?; // For `HummockStorage`, a cluster recovery will clear storage data and make subsequent // `HummockReadEpoch::Current` read incomplete. // `validate_read_epoch` is a safeguard against that incorrect read. It rejects the read diff --git a/src/storage/src/table/mod.rs b/src/storage/src/table/mod.rs index aa876e2c6b88c..b6407528d5272 100644 --- a/src/storage/src/table/mod.rs +++ b/src/storage/src/table/mod.rs @@ -162,7 +162,7 @@ pub fn compute_vnode(row: impl Row, indices: &[usize], vnodes: &Bitmap) -> Virtu vnode }; - tracing::trace!(target: "events::storage::storage_table", "compute vnode: {:?} key {:?} => {}", row, indices, vnode); + tracing::debug!(target: "events::storage::storage_table", "compute vnode: {:?} key {:?} => {}", row, indices, vnode); vnode } @@ -184,7 +184,7 @@ pub fn compute_chunk_vnode( VirtualNode::compute_chunk(chunk, &dist_key_indices) .into_iter() - .zip_eq_fast(chunk.vis().iter()) + .zip_eq_fast(chunk.visibility().iter()) .map(|(vnode, vis)| { // Ignore the invisible rows. if vis { diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs deleted file mode 100644 index dc143f4a93855..0000000000000 --- a/src/storage/src/write_batch.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::ops::Bound; - -use bytes::Bytes; -use risingwave_hummock_sdk::key::next_key; - -use crate::error::StorageResult; -use crate::hummock::HummockError; -use crate::storage_value::StorageValue; -use crate::store::{StateStoreWrite, WriteOptions}; - -/// [`WriteBatch`] wraps a list of key-value pairs and an associated [`crate::StateStore`]. -pub struct WriteBatch<'a, S: StateStoreWrite> { - store: &'a S, - - batch: Vec<(Bytes, StorageValue)>, - - delete_ranges: Vec<(Bound, Bound)>, - - write_options: WriteOptions, -} - -impl<'a, S: StateStoreWrite> WriteBatch<'a, S> { - /// Constructs a new, empty [`WriteBatch`] with the given `store`. - pub fn new(store: &'a S, write_options: WriteOptions) -> Self { - Self { - store, - batch: vec![], - delete_ranges: vec![], - write_options, - } - } - - /// Constructs a new, empty [`WriteBatch`] with the given `store` and specified capacity. - pub fn with_capacity(store: &'a S, capacity: usize, write_options: WriteOptions) -> Self { - Self { - store, - batch: Vec::with_capacity(capacity), - delete_ranges: vec![], - write_options, - } - } - - /// Puts a value. - pub fn put(&mut self, key: impl AsRef<[u8]>, value: StorageValue) { - self.do_push(key.as_ref(), value); - } - - /// Deletes a value. - pub fn delete(&mut self, key: impl AsRef<[u8]>) { - self.do_push(key.as_ref(), StorageValue::new_delete()); - } - - /// Delete all keys starting with `prefix`. - pub fn delete_prefix(&mut self, prefix: impl AsRef<[u8]>) { - let start_key = Bytes::from(prefix.as_ref().to_owned()); - let end_key = Bytes::from(next_key(&start_key)); - self.delete_ranges - .push((Bound::Included(start_key), Bound::Excluded(end_key))); - } - - /// Delete all keys in this range. - pub fn delete_range(&mut self, start: Bound>, end: Bound>) { - self.delete_ranges.push(( - start.map(|start| Bytes::from(start.as_ref().to_owned())), - end.map(|end| Bytes::from(end.as_ref().to_owned())), - )); - } - - /// Reserves capacity for at least `additional` more key-value pairs to be inserted in the - /// batch. - pub fn reserve(&mut self, additional: usize) { - self.batch.reserve(additional); - } - - /// Returns the number of key-value pairs in the batch. - pub fn len(&self) -> usize { - self.batch.len() - } - - /// Preprocesses the batch to make it sorted. It returns `false` if duplicate keys are found. - fn preprocess(&mut self) -> StorageResult<()> { - let original_length = self.batch.len(); - self.batch.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); - self.batch.dedup_by(|(k1, _), (k2, _)| k1 == k2); - - if original_length == self.batch.len() { - Ok(()) - } else { - Err(HummockError::invalid_write_batch().into()) - } - } - - /// Returns `true` if the batch contains no key-value pairs. - pub fn is_empty(&self) -> bool { - self.batch.is_empty() && self.delete_ranges.is_empty() - } - - /// Ingests this batch into the associated state store. - pub async fn ingest(mut self) -> StorageResult<()> { - if !self.is_empty() { - self.preprocess()?; - self.store - .ingest_batch(self.batch, self.delete_ranges, self.write_options) - .await?; - } - Ok(()) - } - - /// Pushes `key` and `value` into the `WriteBatch`. - fn do_push(&mut self, key: &[u8], value: StorageValue) { - let key = Bytes::from(key.to_vec()); - self.batch.push((key, value)); - } -} - -#[cfg(test)] -mod tests { - use bytes::Bytes; - - use crate::memory::MemoryStateStore; - use crate::storage_value::StorageValue; - use crate::store::{StateStoreWrite, WriteOptions}; - - #[tokio::test] - async fn test_invalid_write_batch() { - let state_store = MemoryStateStore::new(); - let mut batch = state_store.start_write_batch(WriteOptions { - epoch: 1, - table_id: Default::default(), - }); - - batch.put(Bytes::from("aa"), StorageValue::new_put("444")); - batch.put(Bytes::from("cc"), StorageValue::new_put("444")); - batch.put(Bytes::from("bb"), StorageValue::new_put("444")); - batch.delete(Bytes::from("aa")); - - batch - .ingest() - .await - .expect_err("Should panic here because of duplicate key."); - } -} diff --git a/src/stream/Cargo.toml b/src/stream/Cargo.toml index 79db63474cfd4..9e9e77b92ceec 100644 --- a/src/stream/Cargo.toml +++ b/src/stream/Cargo.toml @@ -21,32 +21,31 @@ async-stream = "0.3" async-trait = "0.1" await-tree = { workspace = true } bytes = "1" -dyn-clone = "1" educe = "0.4" either = "1" enum-as-inner = "0.6" futures = { version = "0.3", default-features = false, features = ["alloc"] } futures-async-stream = { workspace = true } -governor = { version = "0.6", default-features = false, features = ["std", "dashmap", "jitter"] } +governor = { version = "0.6", default-features = false, features = [ + "std", + "dashmap", + "jitter", +] } hytra = "0.1.2" -iter-chunks = "0.1" itertools = "0.11" local_stats_alloc = { path = "../utils/local_stats_alloc" } lru = { git = "https://github.com/risingwavelabs/lru-rs.git", rev = "cb2d7c7" } maplit = "1.0.2" memcomparable = "0.2" -multimap = "0.8" -num-traits = "0.2" +multimap = "0.9" parking_lot = "0.12" -parse-display = "0.8" pin-project = "1" prometheus = { version = "0.13", features = ["process"] } -prost = "0.11" +prost = { workspace = true } rand = "0.8" risingwave_common = { workspace = true } risingwave_connector = { workspace = true } risingwave_expr = { workspace = true } -risingwave_frontend = { workspace = true } risingwave_hummock_sdk = { workspace = true } risingwave_pb = { workspace = true } risingwave_rpc_client = { workspace = true } @@ -54,7 +53,6 @@ risingwave_source = { workspace = true } risingwave_storage = { workspace = true } serde_json = "1" smallvec = "1" -spin = "0.9" static_assertions = "1" thiserror = "1" tokio = { version = "0.2", package = "madsim-tokio", features = [ @@ -81,6 +79,7 @@ workspace-hack = { path = "../workspace-hack" } assert_matches = "1" criterion = { workspace = true, features = ["async_tokio", "async"] } expect-test = "1" +risingwave_expr_impl = { workspace = true } risingwave_hummock_test = { path = "../storage/hummock_test", features = [ "test", ] } diff --git a/src/stream/benches/stream_hash_agg.rs b/src/stream/benches/stream_hash_agg.rs index a5392f011afbb..62c45421c7f60 100644 --- a/src/stream/benches/stream_hash_agg.rs +++ b/src/stream/benches/stream_hash_agg.rs @@ -20,7 +20,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::field_generator::VarcharProperty; use risingwave_common::test_prelude::StreamChunkTestExt; use risingwave_common::types::DataType; -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_expr::expr::*; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::StateStore; diff --git a/src/stream/clippy.toml b/src/stream/clippy.toml index 6f4d9099676af..b7257c4acb98c 100644 --- a/src/stream/clippy.toml +++ b/src/stream/clippy.toml @@ -1,7 +1,10 @@ disallowed-methods = [ { path = "std::iter::Iterator::zip", reason = "Please use Itertools::zip_eq instead." }, - { path = "risingwave_expr::expr::Expression::eval", reason = "Please use InfallibleExpression::eval_infallible instead." }, - { path = "risingwave_expr::expr::Expression::eval_row", reason = "Please use InfallibleExpression::eval_row_infallible instead." }, + + { path = "risingwave_expr::expr::build_from_prost", reason = "Expressions in streaming must be in non-strict mode. Please use `build_non_strict_from_prost` instead." }, + { path = "risingwave_expr::expr::build_func", reason = "Expressions in streaming must be in non-strict mode. Please use `build_func_non_strict` instead." }, + { path = "risingwave_expr::expr::Expression::eval", reason = "Please use `NonStrictExpression::eval_infallible` instead." }, + { path = "risingwave_expr::expr::Expression::eval_row", reason = "Please use `NonStrictExpression::eval_row_infallible` instead." }, { path = "risingwave_common::error::internal_err", reason = "Please use per-crate error type instead." }, { path = "risingwave_common::error::internal_error", reason = "Please use per-crate error type instead." }, diff --git a/src/stream/src/cache/managed_lru.rs b/src/stream/src/cache/managed_lru.rs index ab9703a557531..0608e429f4bb7 100644 --- a/src/stream/src/cache/managed_lru.rs +++ b/src/stream/src/cache/managed_lru.rs @@ -17,7 +17,6 @@ use std::borrow::Borrow; use std::cmp::min; use std::hash::{BuildHasher, Hash}; use std::ops::{Deref, DerefMut}; -use std::ptr::NonNull; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; @@ -42,7 +41,7 @@ pub struct ManagedLruCache Drop for ManagedLruCache { }; if let Err(e) = info .metrics - .lru_evicted_watermark_time_diff_ms + .lru_evicted_watermark_time_ms .remove_label_values(&[&info.table_id, &info.actor_id, &info.desc]) { warn!( - "unable to remove lru_evicted_watermark_time_diff_ms of {} {} {}: {:?}", + "unable to remove lru_evicted_watermark_time_ms of {} {} {}: {:?}", info.table_id, info.actor_id, info.desc, e ); } @@ -95,22 +94,21 @@ impl Option> { - let v = self.inner.get_mut(k); - v.map(|inner| { - UnsafeMutGuard::new( - inner, - &mut self.kv_heap_size, - &mut self.last_reported_size_bytes, - &mut self.memory_usage_metrics, - ) - }) - } - pub fn get(&mut self, k: &Q) -> Option<&V> where KeyRef: Borrow, @@ -257,9 +243,8 @@ impl u64 { @@ -367,45 +352,3 @@ impl<'a, V: EstimateSize> DerefMut for MutGuard<'a, V> { self.inner } } - -pub struct UnsafeMutGuard { - inner: NonNull, - // The size of the original value - original_val_size: usize, - // The total size of a collection - total_size: NonNull, - last_reported_size_bytes: NonNull, - memory_usage_metrics: NonNull, -} - -impl UnsafeMutGuard { - pub fn new( - inner: &mut V, - total_size: &mut usize, - last_reported_size_bytes: &mut usize, - memory_usage_metrics: &mut IntGauge, - ) -> Self { - let original_val_size = inner.estimated_size(); - Self { - inner: inner.into(), - original_val_size, - total_size: total_size.into(), - last_reported_size_bytes: last_reported_size_bytes.into(), - memory_usage_metrics: memory_usage_metrics.into(), - } - } - - /// # Safety - /// - /// 1. Only 1 `MutGuard` should be held for each value. - /// 2. The returned `MutGuard` should not be moved to other threads. - pub unsafe fn as_mut_guard<'a>(&mut self) -> MutGuard<'a, V> { - MutGuard { - inner: self.inner.as_mut(), - original_val_size: self.original_val_size, - total_size: self.total_size.as_mut(), - last_reported_size_bytes: self.last_reported_size_bytes.as_mut(), - memory_usage_metrics: self.memory_usage_metrics.as_mut(), - } - } -} diff --git a/src/stream/src/common/builder.rs b/src/stream/src/common/builder.rs index 0945066592d83..947a79f3747c9 100644 --- a/src/stream/src/common/builder.rs +++ b/src/stream/src/common/builder.rs @@ -139,7 +139,6 @@ impl StreamChunkBuilder { Some(StreamChunk::new( std::mem::replace(&mut self.ops, Vec::with_capacity(self.capacity)), new_columns, - None, )) } } diff --git a/src/stream/src/common/log_store/kv_log_store/mod.rs b/src/stream/src/common/log_store/kv_log_store/mod.rs deleted file mode 100644 index f54ef44c49141..0000000000000 --- a/src/stream/src/common/log_store/kv_log_store/mod.rs +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::buffer::Bitmap; -use risingwave_common::catalog::{TableId, TableOption}; -use risingwave_pb::catalog::Table; -use risingwave_storage::store::NewLocalOptions; -use risingwave_storage::StateStore; - -use crate::common::log_store::kv_log_store::buffer::new_log_store_buffer; -use crate::common::log_store::kv_log_store::reader::KvLogStoreReader; -use crate::common::log_store::kv_log_store::serde::LogStoreRowSerde; -use crate::common::log_store::kv_log_store::writer::KvLogStoreWriter; -use crate::common::log_store::LogStoreFactory; - -mod buffer; -mod reader; -mod serde; -#[cfg(test)] -mod test_utils; -mod writer; - -type SeqIdType = i32; -type RowOpCodeType = i16; - -const FIRST_SEQ_ID: SeqIdType = 0; - -/// Readers truncate the offset at the granularity of epoch -type ReaderTruncationOffsetType = u64; - -pub struct KvLogStoreFactory { - state_store: S, - - table_catalog: Table, - - vnodes: Option>, - - max_stream_chunk_count: usize, -} - -impl KvLogStoreFactory { - pub fn new( - state_store: S, - table_catalog: Table, - vnodes: Option>, - max_stream_chunk_count: usize, - ) -> Self { - Self { - state_store, - table_catalog, - vnodes, - max_stream_chunk_count, - } - } -} - -impl LogStoreFactory for KvLogStoreFactory { - type Reader = KvLogStoreReader; - type Writer = KvLogStoreWriter; - - async fn build(self) -> (Self::Reader, Self::Writer) { - let table_id = TableId::new(self.table_catalog.id); - let serde = LogStoreRowSerde::new(&self.table_catalog, self.vnodes); - let local_state_store = self - .state_store - .new_local(NewLocalOptions { - table_id: TableId { - table_id: self.table_catalog.id, - }, - is_consistent_op: false, - table_option: TableOption { - retention_seconds: None, - }, - is_replicated: false, - }) - .await; - - let (tx, rx) = new_log_store_buffer(self.max_stream_chunk_count); - - let reader = KvLogStoreReader::new(table_id, self.state_store, serde.clone(), rx); - - let writer = KvLogStoreWriter::new(table_id, local_state_store, serde, tx); - - (reader, writer) - } -} - -#[cfg(test)] -mod tests { - use risingwave_common::util::epoch::EpochPair; - use risingwave_hummock_sdk::HummockReadEpoch; - use risingwave_hummock_test::test_utils::prepare_hummock_test_env; - use risingwave_storage::store::SyncResult; - use risingwave_storage::StateStore; - - use crate::common::log_store::kv_log_store::test_utils::{ - gen_stream_chunk, gen_test_log_store_table, - }; - use crate::common::log_store::kv_log_store::KvLogStoreFactory; - use crate::common::log_store::{LogReader, LogStoreFactory, LogStoreReadItem, LogWriter}; - - #[tokio::test] - async fn test_basic() { - for count in 0..20 { - test_basic_inner(count).await - } - } - - async fn test_basic_inner(max_stream_chunk_count: usize) { - let test_env = prepare_hummock_test_env().await; - - let table = gen_test_log_store_table(); - - test_env.register_table(table.clone()).await; - - let factory = KvLogStoreFactory::new( - test_env.storage.clone(), - table.clone(), - None, - max_stream_chunk_count, - ); - let (mut reader, mut writer) = factory.build().await; - - let stream_chunk1 = gen_stream_chunk(0); - let stream_chunk2 = gen_stream_chunk(10); - - let epoch1 = test_env - .storage - .get_pinned_version() - .version() - .max_committed_epoch - + 1; - writer - .init(EpochPair::new_test_epoch(epoch1)) - .await - .unwrap(); - writer.write_chunk(stream_chunk1.clone()).await.unwrap(); - let epoch2 = epoch1 + 1; - writer.flush_current_epoch(epoch2, false).await.unwrap(); - writer.write_chunk(stream_chunk2.clone()).await.unwrap(); - let epoch3 = epoch2 + 1; - writer.flush_current_epoch(epoch3, true).await.unwrap(); - - test_env.storage.seal_epoch(epoch1, false); - test_env.storage.seal_epoch(epoch2, true); - let sync_result: SyncResult = test_env.storage.sync(epoch2).await.unwrap(); - assert!(!sync_result.uncommitted_ssts.is_empty()); - - reader.init().await.unwrap(); - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch1); - assert_eq!(stream_chunk1, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch1); - assert!(!is_checkpoint) - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch2); - assert_eq!(stream_chunk2, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch2); - assert!(is_checkpoint) - } - _ => unreachable!(), - } - } - - #[tokio::test] - async fn test_recovery() { - for count in 0..20 { - test_recovery_inner(count).await - } - } - - async fn test_recovery_inner(max_stream_chunk_count: usize) { - let test_env = prepare_hummock_test_env().await; - - let table = gen_test_log_store_table(); - - test_env.register_table(table.clone()).await; - - let factory = KvLogStoreFactory::new( - test_env.storage.clone(), - table.clone(), - None, - max_stream_chunk_count, - ); - let (mut reader, mut writer) = factory.build().await; - - let stream_chunk1 = gen_stream_chunk(0); - let stream_chunk2 = gen_stream_chunk(10); - - let epoch1 = test_env - .storage - .get_pinned_version() - .version() - .max_committed_epoch - + 1; - writer - .init(EpochPair::new_test_epoch(epoch1)) - .await - .unwrap(); - writer.write_chunk(stream_chunk1.clone()).await.unwrap(); - let epoch2 = epoch1 + 1; - writer.flush_current_epoch(epoch2, false).await.unwrap(); - writer.write_chunk(stream_chunk2.clone()).await.unwrap(); - let epoch3 = epoch2 + 1; - writer.flush_current_epoch(epoch3, true).await.unwrap(); - - test_env.storage.seal_epoch(epoch1, false); - - reader.init().await.unwrap(); - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch1); - assert_eq!(stream_chunk1, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch1); - assert!(!is_checkpoint) - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch2); - assert_eq!(stream_chunk2, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch2); - assert!(is_checkpoint) - } - _ => unreachable!(), - } - - test_env.commit_epoch(epoch2).await; - // The truncate does not work because it is after the sync - reader.truncate().await.unwrap(); - test_env - .storage - .try_wait_epoch(HummockReadEpoch::Committed(epoch2)) - .await - .unwrap(); - - // Recovery - test_env.storage.clear_shared_buffer().await.unwrap(); - - // Rebuild log reader and writer in recovery - let factory = KvLogStoreFactory::new( - test_env.storage.clone(), - table.clone(), - None, - max_stream_chunk_count, - ); - let (mut reader, mut writer) = factory.build().await; - writer - .init(EpochPair::new_test_epoch(epoch3)) - .await - .unwrap(); - reader.init().await.unwrap(); - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch1); - assert_eq!(stream_chunk1, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch1); - assert!(!is_checkpoint) - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch2); - assert_eq!(stream_chunk2, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch2); - assert!(is_checkpoint) - } - _ => unreachable!(), - } - } - - #[tokio::test] - async fn test_truncate() { - for count in 0..20 { - test_truncate_inner(count).await - } - } - - async fn test_truncate_inner(max_stream_chunk_count: usize) { - let test_env = prepare_hummock_test_env().await; - - let table = gen_test_log_store_table(); - - test_env.register_table(table.clone()).await; - - let factory = KvLogStoreFactory::new( - test_env.storage.clone(), - table.clone(), - None, - max_stream_chunk_count, - ); - let (mut reader, mut writer) = factory.build().await; - - let stream_chunk1 = gen_stream_chunk(0); - let stream_chunk2 = gen_stream_chunk(10); - - let epoch1 = test_env - .storage - .get_pinned_version() - .version() - .max_committed_epoch - + 1; - writer - .init(EpochPair::new_test_epoch(epoch1)) - .await - .unwrap(); - writer.write_chunk(stream_chunk1.clone()).await.unwrap(); - let epoch2 = epoch1 + 1; - writer.flush_current_epoch(epoch2, true).await.unwrap(); - writer.write_chunk(stream_chunk2.clone()).await.unwrap(); - - test_env.commit_epoch(epoch1).await; - - reader.init().await.unwrap(); - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch1); - assert_eq!(stream_chunk1, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch1); - assert!(is_checkpoint) - } - _ => unreachable!(), - } - - // The truncate should work because it is before the flush - reader.truncate().await.unwrap(); - let epoch3 = epoch2 + 1; - writer.flush_current_epoch(epoch3, true).await.unwrap(); - - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch2); - assert_eq!(stream_chunk2, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch2); - assert!(is_checkpoint) - } - _ => unreachable!(), - } - - // Truncation on epoch1 should work because it is before this sync - test_env.commit_epoch(epoch2).await; - test_env - .storage - .try_wait_epoch(HummockReadEpoch::Committed(epoch2)) - .await - .unwrap(); - - // Recovery - test_env.storage.clear_shared_buffer().await.unwrap(); - - // Rebuild log reader and writer in recovery - let factory = KvLogStoreFactory::new( - test_env.storage.clone(), - table.clone(), - None, - max_stream_chunk_count, - ); - let (mut reader, mut writer) = factory.build().await; - - writer - .init(EpochPair::new_test_epoch(epoch3)) - .await - .unwrap(); - let stream_chunk3 = gen_stream_chunk(20); - writer.write_chunk(stream_chunk3.clone()).await.unwrap(); - - reader.init().await.unwrap(); - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch2); - assert_eq!(stream_chunk2, read_stream_chunk); - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { - assert_eq!(epoch, epoch2); - assert!(is_checkpoint) - } - _ => unreachable!(), - } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(read_stream_chunk)) => { - assert_eq!(epoch, epoch3); - assert_eq!(stream_chunk3, read_stream_chunk); - } - _ => unreachable!(), - } - } -} diff --git a/src/stream/src/common/log_store/kv_log_store/reader.rs b/src/stream/src/common/log_store/kv_log_store/reader.rs deleted file mode 100644 index f31235c42da0b..0000000000000 --- a/src/stream/src/common/log_store/kv_log_store/reader.rs +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::ops::Bound::{Excluded, Included}; -use std::pin::Pin; - -use bytes::Bytes; -use futures::future::try_join_all; -use futures::stream::select_all; -use risingwave_common::cache::CachePriority; -use risingwave_common::catalog::TableId; -use risingwave_common::hash::VnodeBitmapExt; -use risingwave_storage::hummock::CachePolicy; -use risingwave_storage::store::{PrefetchOptions, ReadOptions}; -use risingwave_storage::StateStore; -use tokio_stream::StreamExt; - -use crate::common::log_store::kv_log_store::buffer::{LogStoreBufferItem, LogStoreBufferReceiver}; -use crate::common::log_store::kv_log_store::serde::{ - new_log_store_item_stream, LogStoreItemStream, LogStoreRowSerde, -}; -use crate::common::log_store::{LogReader, LogStoreError, LogStoreReadItem, LogStoreResult}; - -enum ReaderState { - /// No data has been read yet - Uninitialized, - /// Consuming data previously written to state store before the reader was initialized. - ConsumingStateStore { - first_write_epoch: u64, - state_store_stream: Pin>>, - }, - /// Consuming newly written data after the reader was initialized. - ConsumingStream { epoch: u64 }, -} - -pub struct KvLogStoreReader { - table_id: TableId, - - state_store: S, - - serde: LogStoreRowSerde, - - rx: LogStoreBufferReceiver, - - reader_state: ReaderState, -} - -impl KvLogStoreReader { - pub(crate) fn new( - table_id: TableId, - state_store: S, - serde: LogStoreRowSerde, - rx: LogStoreBufferReceiver, - ) -> Self { - Self { - table_id, - state_store, - reader_state: ReaderState::Uninitialized, - serde, - rx, - } - } -} - -impl LogReader for KvLogStoreReader { - async fn init(&mut self) -> LogStoreResult<()> { - let first_write_epoch = self.rx.init().await; - let streams = try_join_all(self.serde.vnodes().iter_vnodes().map(|vnode| { - let range_start = Bytes::from(Vec::from(vnode.to_be_bytes())); - let range_end = self.serde.serialize_epoch(vnode, first_write_epoch); - let table_id = self.table_id; - let state_store = self.state_store.clone(); - async move { - state_store - .iter( - (Included(range_start), Excluded(range_end)), - u64::MAX, - ReadOptions { - prefetch_options: PrefetchOptions::new_for_exhaust_iter(), - cache_policy: CachePolicy::Fill(CachePriority::Low), - table_id, - ..Default::default() - }, - ) - .await - } - })) - .await?; - // TODO: set chunk size by config - let state_store_stream = - Box::pin(new_log_store_item_stream(streams, self.serde.clone(), 1024)); - self.reader_state = ReaderState::ConsumingStateStore { - first_write_epoch, - state_store_stream, - }; - Ok(()) - } - - async fn next_item(&mut self) -> LogStoreResult<(u64, LogStoreReadItem)> { - let epoch = match &mut self.reader_state { - ReaderState::Uninitialized => unreachable!("should be initialized"), - ReaderState::ConsumingStateStore { - first_write_epoch, - state_store_stream, - } => { - match state_store_stream.try_next().await? { - Some((epoch, item)) => { - return Ok((epoch, item)); - } - None => { - let first_write_epoch = *first_write_epoch; - // all consumed - self.reader_state = ReaderState::ConsumingStream { - epoch: first_write_epoch, - }; - first_write_epoch - } - } - } - ReaderState::ConsumingStream { epoch } => *epoch, - }; - let (item_epoch, item) = self.rx.next_item().await; - assert_eq!(epoch, item_epoch); - Ok(match item { - LogStoreBufferItem::StreamChunk { chunk, .. } => { - (epoch, LogStoreReadItem::StreamChunk(chunk)) - } - LogStoreBufferItem::Flushed { - vnode_bitmap, - start_seq_id, - end_seq_id, - } => { - let streams = try_join_all(vnode_bitmap.iter_vnodes().map(|vnode| { - let range_start = self - .serde - .serialize_log_store_pk(vnode, epoch, start_seq_id); - let range_end = self.serde.serialize_log_store_pk(vnode, epoch, end_seq_id); - let state_store = self.state_store.clone(); - let table_id = self.table_id; - // Use u64::MAX here because the epoch to consume may be below the safe - // epoch - async move { - Ok::<_, LogStoreError>(Box::pin( - state_store - .iter( - (Included(range_start), Included(range_end)), - u64::MAX, - ReadOptions { - prefetch_options: PrefetchOptions::new_for_exhaust_iter(), - cache_policy: CachePolicy::Fill(CachePriority::Low), - table_id, - ..Default::default() - }, - ) - .await?, - )) - } - })) - .await?; - let combined_stream = select_all(streams); - let stream_chunk = self - .serde - .deserialize_stream_chunk(combined_stream, start_seq_id, end_seq_id, epoch) - .await?; - (epoch, LogStoreReadItem::StreamChunk(stream_chunk)) - } - LogStoreBufferItem::Barrier { - is_checkpoint, - next_epoch, - } => { - assert!( - epoch < next_epoch, - "next epoch {} should be greater than current epoch {}", - next_epoch, - epoch - ); - self.reader_state = ReaderState::ConsumingStream { epoch: next_epoch }; - (epoch, LogStoreReadItem::Barrier { is_checkpoint }) - } - LogStoreBufferItem::UpdateVnodes(bitmap) => { - self.serde.update_vnode_bitmap(bitmap.clone()); - (epoch, LogStoreReadItem::UpdateVnodeBitmap(bitmap)) - } - }) - } - - async fn truncate(&mut self) -> LogStoreResult<()> { - self.rx.truncate(); - Ok(()) - } -} diff --git a/src/stream/src/common/log_store/kv_log_store/test_utils.rs b/src/stream/src/common/log_store/kv_log_store/test_utils.rs deleted file mode 100644 index 8eb3a82fb742d..0000000000000 --- a/src/stream/src/common/log_store/kv_log_store/test_utils.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use itertools::Itertools; -use risingwave_common::array::{Op, StreamChunk}; -use risingwave_common::catalog::{ColumnDesc, ColumnId, TableId}; -use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, ScalarImpl, ScalarRef}; -use risingwave_common::util::chunk_coalesce::DataChunkBuilder; -use risingwave_common::util::sort_util::OrderType; -use risingwave_pb::catalog::PbTable; - -use crate::common::table::test_utils::gen_prost_table; - -pub(crate) const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; - -pub(crate) fn gen_test_data(base: i64) -> (Vec, Vec) { - let ops = vec![Op::Insert, Op::Delete, Op::UpdateDelete, Op::UpdateInsert]; - let rows = vec![ - OwnedRow::new(vec![ - Some(ScalarImpl::Int64(1 + base)), - Some(ScalarImpl::Utf8("name1".to_owned_scalar())), - ]), - OwnedRow::new(vec![ - Some(ScalarImpl::Int64(2 + base)), - Some(ScalarImpl::Utf8("name2".to_owned_scalar())), - ]), - OwnedRow::new(vec![ - Some(ScalarImpl::Int64(3 + base)), - Some(ScalarImpl::Utf8("name3".to_owned_scalar())), - ]), - OwnedRow::new(vec![ - Some(ScalarImpl::Int64(3 + base)), - Some(ScalarImpl::Utf8("name4".to_owned_scalar())), - ]), - ]; - (ops, rows) -} - -pub(crate) fn test_payload_schema() -> Vec { - vec![ - ColumnDesc::unnamed(ColumnId::from(3), DataType::Int64), // id - ColumnDesc::unnamed(ColumnId::from(2), DataType::Varchar), // name - ] -} - -pub(crate) fn test_log_store_table_schema() -> Vec { - let mut column_descs = vec![ - ColumnDesc::unnamed(ColumnId::from(0), DataType::Int64), // epoch - ColumnDesc::unnamed(ColumnId::from(1), DataType::Int32), // Seq id - ColumnDesc::unnamed(ColumnId::from(2), DataType::Int16), // op code - ]; - column_descs.extend(test_payload_schema()); - column_descs -} - -pub(crate) fn gen_stream_chunk(base: i64) -> StreamChunk { - let (ops, rows) = gen_test_data(base); - let mut builder = DataChunkBuilder::new( - test_payload_schema() - .iter() - .map(|col| col.data_type.clone()) - .collect_vec(), - 1000000, - ); - for row in &rows { - assert!(builder.append_one_row(row).is_none()); - } - let data_chunk = builder.consume_all().unwrap(); - StreamChunk::from_parts(ops, data_chunk) -} - -pub(crate) fn gen_test_log_store_table() -> PbTable { - let schema = test_log_store_table_schema(); - let order_types = vec![OrderType::ascending(), OrderType::ascending_nulls_last()]; - let pk_index = vec![0_usize, 1_usize]; - let read_prefix_len_hint = 0; - gen_prost_table( - TEST_TABLE_ID, - schema, - order_types, - pk_index, - read_prefix_len_hint, - ) -} diff --git a/src/stream/src/common/log_store/mod.rs b/src/stream/src/common/log_store/mod.rs deleted file mode 100644 index f343cfdfc8f03..0000000000000 --- a/src/stream/src/common/log_store/mod.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod in_mem; -pub mod kv_log_store; - -use std::fmt::Debug; -use std::future::Future; -use std::sync::Arc; - -use risingwave_common::array::StreamChunk; -use risingwave_common::buffer::Bitmap; -use risingwave_common::util::epoch::EpochPair; -use risingwave_common::util::value_encoding::error::ValueEncodingError; -use risingwave_storage::error::StorageError; - -#[derive(thiserror::Error, Debug)] -pub enum LogStoreError { - #[error("EndOfLogStream")] - EndOfLogStream, - - #[error("Storage error: {0}")] - StorageError(#[from] StorageError), - - #[error(transparent)] - Internal(#[from] anyhow::Error), - - #[error("Value encoding error: {0}")] - ValueEncoding(#[from] ValueEncodingError), -} - -pub type LogStoreResult = Result; - -#[derive(Debug)] -pub enum LogStoreReadItem { - StreamChunk(StreamChunk), - Barrier { is_checkpoint: bool }, - UpdateVnodeBitmap(Arc), -} - -pub trait LogWriter { - /// Initialize the log writer with an epoch - fn init(&mut self, epoch: EpochPair) -> impl Future> + Send + '_; - - /// Write a stream chunk to the log writer - fn write_chunk( - &mut self, - chunk: StreamChunk, - ) -> impl Future> + Send + '_; - - /// Mark current epoch as finished and sealed, and flush the unconsumed log data. - fn flush_current_epoch( - &mut self, - next_epoch: u64, - is_checkpoint: bool, - ) -> impl Future> + Send + '_; - - /// Update the vnode bitmap of the log writer - fn update_vnode_bitmap( - &mut self, - new_vnodes: Arc, - ) -> impl Future> + Send + '_; -} - -pub trait LogReader { - /// Initialize the log reader. Usually function as waiting for log writer to be initialized. - fn init(&mut self) -> impl Future> + Send + '_; - - /// Emit the next item. - fn next_item( - &mut self, - ) -> impl Future> + Send + '_; - - /// Mark that all items emitted so far have been consumed and it is safe to truncate the log - /// from the current offset. - fn truncate(&mut self) -> impl Future> + Send + '_; -} - -pub trait LogStoreFactory: 'static { - type Reader: LogReader + Send + 'static; - type Writer: LogWriter + Send + 'static; - - fn build(self) -> impl Future + Send; -} diff --git a/src/stream/src/common/log_store/in_mem.rs b/src/stream/src/common/log_store_impl/in_mem.rs similarity index 65% rename from src/stream/src/common/log_store/in_mem.rs rename to src/stream/src/common/log_store_impl/in_mem.rs index 7d136a64a8257..35040be82c93b 100644 --- a/src/stream/src/common/log_store/in_mem.rs +++ b/src/stream/src/common/log_store_impl/in_mem.rs @@ -18,15 +18,15 @@ use anyhow::anyhow; use risingwave_common::array::StreamChunk; use risingwave_common::buffer::Bitmap; use risingwave_common::util::epoch::{EpochPair, INVALID_EPOCH}; +use risingwave_connector::sink::log_store::{ + LogReader, LogStoreFactory, LogStoreReadItem, LogStoreResult, LogWriter, TruncateOffset, +}; use tokio::sync::mpsc::{ channel, unbounded_channel, Receiver, Sender, UnboundedReceiver, UnboundedSender, }; use tokio::sync::oneshot; -use crate::common::log_store::in_mem::LogReaderEpochProgress::{AwaitingTruncate, Consuming}; -use crate::common::log_store::{ - LogReader, LogStoreError, LogStoreFactory, LogStoreReadItem, LogStoreResult, LogWriter, -}; +use crate::common::log_store_impl::in_mem::LogReaderEpochProgress::{AwaitingTruncate, Consuming}; enum InMemLogStoreItem { StreamChunk(StreamChunk), @@ -79,6 +79,12 @@ pub struct BoundedInMemLogStoreReader { /// Sender of consumed epoch to the log writer truncated_epoch_tx: UnboundedSender, + + /// Offset of the latest emitted item + latest_offset: TruncateOffset, + + /// Offset of the latest truncated item + truncate_offset: TruncateOffset, } pub struct BoundedInMemLogStoreFactory { @@ -104,6 +110,8 @@ impl LogStoreFactory for BoundedInMemLogStoreFactory { init_epoch_rx: Some(init_epoch_rx), item_rx, truncated_epoch_tx, + latest_offset: TruncateOffset::Barrier { epoch: 0 }, + truncate_offset: TruncateOffset::Barrier { epoch: 0 }, }; let writer = BoundedInMemLogStoreWriter { curr_epoch: None, @@ -126,6 +134,8 @@ impl LogReader for BoundedInMemLogStoreReader { .map_err(|e| anyhow!("unable to get init epoch: {:?}", e))?; assert_eq!(self.epoch_progress, UNINITIALIZED); self.epoch_progress = LogReaderEpochProgress::Consuming(epoch); + self.latest_offset = TruncateOffset::Barrier { epoch: epoch - 1 }; + self.truncate_offset = TruncateOffset::Barrier { epoch: epoch - 1 }; Ok(()) } @@ -134,7 +144,29 @@ impl LogReader for BoundedInMemLogStoreReader { Some(item) => match self.epoch_progress { Consuming(current_epoch) => match item { InMemLogStoreItem::StreamChunk(chunk) => { - Ok((current_epoch, LogStoreReadItem::StreamChunk(chunk))) + let chunk_id = match self.latest_offset { + TruncateOffset::Chunk { epoch, chunk_id } => { + assert_eq!(epoch, current_epoch); + chunk_id + 1 + } + TruncateOffset::Barrier { epoch } => { + assert!( + epoch < current_epoch, + "prev offset at barrier {} but current epoch {}", + epoch, + current_epoch + ); + 0 + } + }; + self.latest_offset = TruncateOffset::Chunk { + epoch: current_epoch, + chunk_id, + }; + Ok(( + current_epoch, + LogStoreReadItem::StreamChunk { chunk, chunk_id }, + )) } InMemLogStoreItem::Barrier { is_checkpoint, @@ -148,6 +180,9 @@ impl LogReader for BoundedInMemLogStoreReader { } else { self.epoch_progress = Consuming(next_epoch); } + self.latest_offset = TruncateOffset::Barrier { + epoch: current_epoch, + }; Ok((current_epoch, LogStoreReadItem::Barrier { is_checkpoint })) } InMemLogStoreItem::UpdateVnodeBitmap(vnode_bitmap) => Ok(( @@ -155,28 +190,47 @@ impl LogReader for BoundedInMemLogStoreReader { LogStoreReadItem::UpdateVnodeBitmap(vnode_bitmap), )), }, - AwaitingTruncate { .. } => { - unreachable!("should not be awaiting for when barrier comes") - } + AwaitingTruncate { .. } => Err(anyhow!( + "should not call next_item on checkpoint barrier for in-mem log store" + )), }, - None => Err(LogStoreError::EndOfLogStream), + None => Err(anyhow!("end of log stream")), } } - async fn truncate(&mut self) -> LogStoreResult<()> { - let sealed_epoch = match self.epoch_progress { - Consuming(_) => unreachable!("should be awaiting truncate"), - AwaitingTruncate { - sealed_epoch, - next_epoch, - } => { - self.epoch_progress = Consuming(next_epoch); - sealed_epoch + async fn truncate(&mut self, offset: TruncateOffset) -> LogStoreResult<()> { + // check the truncate offset is higher than prev truncate offset + if self.truncate_offset >= offset { + return Err(anyhow!( + "truncate offset {:?} but prev truncate offset is {:?}", + offset, + self.truncate_offset + )); + } + + // check the truncate offset does not exceed the latest possible offset + if offset > self.latest_offset { + return Err(anyhow!( + "truncate at {:?} but latest offset is {:?}", + offset, + self.latest_offset + )); + } + + if let AwaitingTruncate { + sealed_epoch, + next_epoch, + } = &self.epoch_progress + { + if let TruncateOffset::Barrier {epoch} = offset && epoch == *sealed_epoch { + let sealed_epoch = *sealed_epoch; + self.epoch_progress = Consuming(*next_epoch); + self.truncated_epoch_tx + .send(sealed_epoch) + .map_err(|_| anyhow!("unable to send sealed epoch"))?; } - }; - self.truncated_epoch_tx - .send(sealed_epoch) - .map_err(|_| anyhow!("unable to send sealed epoch"))?; + } + self.truncate_offset = offset; Ok(()) } } @@ -230,22 +284,27 @@ impl LogWriter for BoundedInMemLogStoreWriter { } async fn update_vnode_bitmap(&mut self, new_vnodes: Arc) -> LogStoreResult<()> { - Ok(self - .item_tx + self.item_tx .send(InMemLogStoreItem::UpdateVnodeBitmap(new_vnodes)) .await - .map_err(|_| anyhow!("unable to send vnode bitmap"))?) + .map_err(|_| anyhow!("unable to send vnode bitmap")) } } #[cfg(test)] mod tests { + use std::future::poll_fn; + use std::task::Poll; + + use futures::FutureExt; use risingwave_common::array::Op; use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_common::util::epoch::EpochPair; + use risingwave_connector::sink::log_store::{ + LogReader, LogStoreFactory, LogStoreReadItem, LogWriter, TruncateOffset, + }; - use crate::common::log_store::in_mem::BoundedInMemLogStoreFactory; - use crate::common::log_store::{LogReader, LogStoreFactory, LogStoreReadItem, LogWriter}; + use crate::common::log_store_impl::in_mem::BoundedInMemLogStoreFactory; use crate::common::StreamChunkBuilder; #[tokio::test] @@ -273,7 +332,7 @@ mod tests { let stream_chunk = builder.take().unwrap(); let stream_chunk_clone = stream_chunk.clone(); - let join_handle = tokio::spawn(async move { + let mut join_handle = tokio::spawn(async move { writer .init(EpochPair::new_test_epoch(init_epoch)) .await @@ -282,19 +341,33 @@ mod tests { .write_chunk(stream_chunk_clone.clone()) .await .unwrap(); + writer + .write_chunk(stream_chunk_clone.clone()) + .await + .unwrap(); writer.flush_current_epoch(epoch1, false).await.unwrap(); writer.write_chunk(stream_chunk_clone).await.unwrap(); writer.flush_current_epoch(epoch2, true).await.unwrap(); }); reader.init().await.unwrap(); - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(chunk)) => { + let _chunk_id1_1 = match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, chunk_id }) => { assert_eq!(epoch, init_epoch); assert_eq!(&chunk, &stream_chunk); + chunk_id } _ => unreachable!(), - } + }; + + let chunk_id1_2 = match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, chunk_id }) => { + assert_eq!(epoch, init_epoch); + assert_eq!(&chunk, &stream_chunk); + chunk_id + } + _ => unreachable!(), + }; match reader.next_item().await.unwrap() { (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { @@ -304,13 +377,14 @@ mod tests { _ => unreachable!(), } - match reader.next_item().await.unwrap() { - (epoch, LogStoreReadItem::StreamChunk(chunk)) => { + let chunk_id2_1 = match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, chunk_id }) => { assert_eq!(&chunk, &stream_chunk); assert_eq!(epoch, epoch1); + chunk_id } _ => unreachable!(), - } + }; match reader.next_item().await.unwrap() { (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { @@ -320,7 +394,30 @@ mod tests { _ => unreachable!(), } - reader.truncate().await.unwrap(); + reader + .truncate(TruncateOffset::Chunk { + epoch: init_epoch, + chunk_id: chunk_id1_2, + }) + .await + .unwrap(); + assert!(poll_fn(|cx| Poll::Ready(join_handle.poll_unpin(cx))) + .await + .is_pending()); + reader + .truncate(TruncateOffset::Chunk { + epoch: epoch1, + chunk_id: chunk_id2_1, + }) + .await + .unwrap(); + assert!(poll_fn(|cx| Poll::Ready(join_handle.poll_unpin(cx))) + .await + .is_pending()); + reader + .truncate(TruncateOffset::Barrier { epoch: epoch1 }) + .await + .unwrap(); join_handle.await.unwrap(); } } diff --git a/src/stream/src/common/log_store/kv_log_store/buffer.rs b/src/stream/src/common/log_store_impl/kv_log_store/buffer.rs similarity index 64% rename from src/stream/src/common/log_store/kv_log_store/buffer.rs rename to src/stream/src/common/log_store_impl/kv_log_store/buffer.rs index c433efa6781b4..ed1c495c81d75 100644 --- a/src/stream/src/common/log_store/kv_log_store/buffer.rs +++ b/src/stream/src/common/log_store_impl/kv_log_store/buffer.rs @@ -19,10 +19,10 @@ use std::sync::Arc; use parking_lot::{Mutex, MutexGuard}; use risingwave_common::array::StreamChunk; use risingwave_common::buffer::Bitmap; +use risingwave_connector::sink::log_store::{ChunkId, LogStoreResult, TruncateOffset}; use tokio::sync::{oneshot, Notify}; -use crate::common::log_store::kv_log_store::{ReaderTruncationOffsetType, SeqIdType}; -use crate::common::log_store::LogStoreResult; +use crate::common::log_store_impl::kv_log_store::{ReaderTruncationOffsetType, SeqIdType}; #[derive(Clone)] pub(crate) enum LogStoreBufferItem { @@ -31,12 +31,14 @@ pub(crate) enum LogStoreBufferItem { start_seq_id: SeqIdType, end_seq_id: SeqIdType, flushed: bool, + chunk_id: ChunkId, }, Flushed { vnode_bitmap: Bitmap, start_seq_id: SeqIdType, end_seq_id: SeqIdType, + chunk_id: ChunkId, }, Barrier { @@ -52,66 +54,58 @@ struct LogStoreBufferInner { unconsumed_queue: VecDeque<(u64, LogStoreBufferItem)>, /// Items already read by log reader by not truncated. Newer item at the front consumed_queue: VecDeque<(u64, LogStoreBufferItem)>, - stream_chunk_count: usize, - consumed_stream_chunk_count: usize, - max_stream_chunk_count: usize, + row_count: usize, + max_row_count: usize, - updated_truncation: Option, + truncation_list: VecDeque, + + next_chunk_id: ChunkId, } impl LogStoreBufferInner { fn can_add_stream_chunk(&self) -> bool { - self.stream_chunk_count < self.max_stream_chunk_count + self.row_count < self.max_row_count } fn add_item(&mut self, epoch: u64, item: LogStoreBufferItem) { if let LogStoreBufferItem::StreamChunk { .. } = item { unreachable!("StreamChunk should call try_add_item") } - assert!( - self.try_add_item(epoch, item).is_none(), - "call on item other than StreamChunk should always succeed" - ); + if let LogStoreBufferItem::Barrier { .. } = &item { + self.next_chunk_id = 0; + } + self.unconsumed_queue.push_front((epoch, item)); } - /// Try adding a `LogStoreBufferItem` to the buffer. If the stream chunk count exceeds the - /// maximum count, it will return the original stream chunk if we are adding a stream chunk. - fn try_add_item(&mut self, epoch: u64, item: LogStoreBufferItem) -> Option { - match item { - LogStoreBufferItem::StreamChunk { - chunk, - start_seq_id, - end_seq_id, - flushed, - } => { - if !self.can_add_stream_chunk() { - Some(chunk) - } else { - self.stream_chunk_count += 1; - self.unconsumed_queue.push_front(( - epoch, - LogStoreBufferItem::StreamChunk { - chunk, - start_seq_id, - end_seq_id, - flushed, - }, - )); - None - } - } - item => { - self.unconsumed_queue.push_front((epoch, item)); - None - } + pub(crate) fn try_add_stream_chunk( + &mut self, + epoch: u64, + chunk: StreamChunk, + start_seq_id: SeqIdType, + end_seq_id: SeqIdType, + ) -> Option { + if !self.can_add_stream_chunk() { + Some(chunk) + } else { + let chunk_id = self.next_chunk_id; + self.next_chunk_id += 1; + self.row_count += chunk.cardinality(); + self.unconsumed_queue.push_front(( + epoch, + LogStoreBufferItem::StreamChunk { + chunk, + start_seq_id, + end_seq_id, + flushed: false, + chunk_id, + }, + )); + None } } fn pop_item(&mut self) -> Option<(u64, LogStoreBufferItem)> { if let Some((epoch, item)) = self.unconsumed_queue.pop_back() { - if let LogStoreBufferItem::StreamChunk { .. } = &item { - self.consumed_stream_chunk_count += 1; - } self.consumed_queue.push_front((epoch, item.clone())); Some((epoch, item)) } else { @@ -148,12 +142,15 @@ impl LogStoreBufferInner { *prev_end_seq_id = end_seq_id; *vnode_bitmap |= new_vnode_bitmap; } else { + let chunk_id = self.next_chunk_id; + self.next_chunk_id += 1; self.add_item( epoch, LogStoreBufferItem::Flushed { start_seq_id, end_seq_id, vnode_bitmap: new_vnode_bitmap, + chunk_id, }, ); } @@ -221,15 +218,10 @@ impl LogStoreBufferSender { start_seq_id: SeqIdType, end_seq_id: SeqIdType, ) -> Option { - let ret = self.buffer.inner().try_add_item( - epoch, - LogStoreBufferItem::StreamChunk { - chunk, - start_seq_id, - end_seq_id, - flushed: false, - }, - ); + let ret = self + .buffer + .inner() + .try_add_stream_chunk(epoch, chunk, start_seq_id, end_seq_id); if ret.is_none() { // notify when successfully add self.update_notify.notify_waiters(); @@ -255,8 +247,13 @@ impl LogStoreBufferSender { self.update_notify.notify_waiters(); } - pub(crate) fn pop_truncation(&self) -> Option { - self.buffer.inner().updated_truncation.take() + pub(crate) fn pop_truncation(&self, curr_epoch: u64) -> Option { + let mut inner = self.buffer.inner(); + let mut ret = None; + while let Some((epoch, _)) = inner.truncation_list.front() && *epoch < curr_epoch { + ret = inner.truncation_list.pop_front(); + } + ret } pub(crate) fn flush_all_unflushed( @@ -275,6 +272,7 @@ impl LogStoreBufferSender { start_seq_id, end_seq_id, flushed, + .. } = item { if *flushed { @@ -321,34 +319,86 @@ impl LogStoreBufferReceiver { } } - pub(crate) fn truncate(&mut self) { + pub(crate) fn truncate(&mut self, offset: TruncateOffset) { let mut inner = self.buffer.inner(); - if let Some((epoch, item)) = inner.consumed_queue.front() { + let mut latest_offset: Option = None; + while let Some((epoch, item)) = inner.consumed_queue.back() { + let epoch = *epoch; match item { + LogStoreBufferItem::StreamChunk { + chunk_id, + flushed, + end_seq_id, + chunk, + .. + } => { + let chunk_offset = TruncateOffset::Chunk { + epoch, + chunk_id: *chunk_id, + }; + let flushed = *flushed; + let end_seq_id = *end_seq_id; + if chunk_offset <= offset { + inner.row_count -= chunk.cardinality(); + inner.consumed_queue.pop_back(); + if flushed { + latest_offset = Some((epoch, Some(end_seq_id))); + } + } else { + break; + } + } + LogStoreBufferItem::Flushed { + chunk_id, + end_seq_id, + .. + } => { + let end_seq_id = *end_seq_id; + let chunk_offset = TruncateOffset::Chunk { + epoch, + chunk_id: *chunk_id, + }; + if chunk_offset <= offset { + inner.consumed_queue.pop_back(); + latest_offset = Some((epoch, Some(end_seq_id))); + } else { + break; + } + } LogStoreBufferItem::Barrier { .. } => { - inner.updated_truncation = Some(*epoch); + let chunk_offset = TruncateOffset::Barrier { epoch }; + if chunk_offset <= offset { + inner.consumed_queue.pop_back(); + latest_offset = Some((epoch, None)); + } else { + break; + } } - _ => { - unreachable!("should only call truncate right after getting a barrier"); + LogStoreBufferItem::UpdateVnodes(_) => { + inner.consumed_queue.pop_back(); } } - inner.consumed_queue.clear(); - inner.stream_chunk_count -= inner.consumed_stream_chunk_count; - inner.consumed_stream_chunk_count = 0; + } + if let Some((epoch, seq_id)) = latest_offset { + if let Some((prev_epoch, ref mut prev_seq_id)) = inner.truncation_list.back_mut() && *prev_epoch == epoch { + *prev_seq_id = seq_id; + } else { + inner.truncation_list.push_back((epoch, seq_id)); + } } } } pub(crate) fn new_log_store_buffer( - max_stream_chunk_count: usize, + max_row_count: usize, ) -> (LogStoreBufferSender, LogStoreBufferReceiver) { let buffer = SharedMutex::new(LogStoreBufferInner { unconsumed_queue: VecDeque::new(), consumed_queue: VecDeque::new(), - stream_chunk_count: 0, - consumed_stream_chunk_count: 0, - max_stream_chunk_count, - updated_truncation: None, + row_count: 0, + max_row_count, + truncation_list: VecDeque::new(), + next_chunk_id: 0, }); let update_notify = Arc::new(Notify::new()); let (init_epoch_tx, init_epoch_rx) = oneshot::channel(); diff --git a/src/stream/src/common/log_store_impl/kv_log_store/mod.rs b/src/stream/src/common/log_store_impl/kv_log_store/mod.rs new file mode 100644 index 0000000000000..4256da4ca9325 --- /dev/null +++ b/src/stream/src/common/log_store_impl/kv_log_store/mod.rs @@ -0,0 +1,975 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use risingwave_common::buffer::Bitmap; +use risingwave_common::catalog::{TableId, TableOption}; +use risingwave_common::metrics::LabelGuardedIntCounter; +use risingwave_connector::sink::log_store::LogStoreFactory; +use risingwave_connector::sink::{SinkParam, SinkWriterParam}; +use risingwave_pb::catalog::Table; +use risingwave_storage::store::NewLocalOptions; +use risingwave_storage::StateStore; + +use crate::common::log_store_impl::kv_log_store::buffer::new_log_store_buffer; +use crate::common::log_store_impl::kv_log_store::reader::KvLogStoreReader; +use crate::common::log_store_impl::kv_log_store::serde::LogStoreRowSerde; +use crate::common::log_store_impl::kv_log_store::writer::KvLogStoreWriter; +use crate::executor::monitor::StreamingMetrics; + +mod buffer; +mod reader; +mod serde; +#[cfg(test)] +mod test_utils; +mod writer; + +type SeqIdType = i32; +type RowOpCodeType = i16; + +const FIRST_SEQ_ID: SeqIdType = 0; + +/// Readers truncate the offset at the granularity of seq id. +/// None `SeqIdType` means that the whole epoch is truncated. +type ReaderTruncationOffsetType = (u64, Option); + +#[derive(Clone)] +pub(crate) struct KvLogStoreReadMetrics { + pub storage_read_count: LabelGuardedIntCounter<4>, + pub storage_read_size: LabelGuardedIntCounter<4>, +} + +impl KvLogStoreReadMetrics { + #[cfg(test)] + pub(crate) fn for_test() -> Self { + Self { + storage_read_count: LabelGuardedIntCounter::test_int_counter(), + storage_read_size: LabelGuardedIntCounter::test_int_counter(), + } + } +} + +#[derive(Clone)] +pub(crate) struct KvLogStoreMetrics { + pub storage_write_count: LabelGuardedIntCounter<3>, + pub storage_write_size: LabelGuardedIntCounter<3>, + pub persistent_log_read_metrics: KvLogStoreReadMetrics, + pub flushed_buffer_read_metrics: KvLogStoreReadMetrics, +} + +impl KvLogStoreMetrics { + pub(crate) fn new( + metrics: &StreamingMetrics, + writer_param: &SinkWriterParam, + sink_param: &SinkParam, + connector: &'static str, + ) -> Self { + let executor_id = format!("{}", writer_param.executor_id); + let sink_id = format!("{}", sink_param.sink_id.sink_id); + let storage_write_size = metrics.kv_log_store_storage_write_size.with_label_values(&[ + executor_id.as_str(), + connector, + sink_id.as_str(), + ]); + let storage_write_count = metrics + .kv_log_store_storage_write_count + .with_label_values(&[executor_id.as_str(), connector, sink_id.as_str()]); + + const READ_PERSISTENT_LOG: &str = "persistent_log"; + const READ_FLUSHED_BUFFER: &str = "flushed_buffer"; + + let persistent_log_read_size = metrics.kv_log_store_storage_read_size.with_label_values(&[ + executor_id.as_str(), + connector, + sink_id.as_str(), + READ_PERSISTENT_LOG, + ]); + let persistent_log_read_count = + metrics.kv_log_store_storage_read_count.with_label_values(&[ + executor_id.as_str(), + connector, + sink_id.as_str(), + READ_PERSISTENT_LOG, + ]); + + let flushed_buffer_read_size = metrics.kv_log_store_storage_read_size.with_label_values(&[ + executor_id.as_str(), + connector, + sink_id.as_str(), + READ_FLUSHED_BUFFER, + ]); + let flushed_buffer_read_count = + metrics.kv_log_store_storage_read_count.with_label_values(&[ + executor_id.as_str(), + connector, + sink_id.as_str(), + READ_FLUSHED_BUFFER, + ]); + + Self { + storage_write_size, + storage_write_count, + persistent_log_read_metrics: KvLogStoreReadMetrics { + storage_read_size: persistent_log_read_size, + storage_read_count: persistent_log_read_count, + }, + flushed_buffer_read_metrics: KvLogStoreReadMetrics { + storage_read_count: flushed_buffer_read_count, + storage_read_size: flushed_buffer_read_size, + }, + } + } + + #[cfg(test)] + fn for_test() -> Self { + KvLogStoreMetrics { + storage_write_count: LabelGuardedIntCounter::test_int_counter(), + storage_write_size: LabelGuardedIntCounter::test_int_counter(), + persistent_log_read_metrics: KvLogStoreReadMetrics::for_test(), + flushed_buffer_read_metrics: KvLogStoreReadMetrics::for_test(), + } + } +} + +pub(crate) struct FlushInfo { + pub(crate) flush_size: usize, + pub(crate) flush_count: usize, +} + +impl FlushInfo { + pub(crate) fn new() -> Self { + FlushInfo { + flush_count: 0, + flush_size: 0, + } + } + + pub(crate) fn flush_one(&mut self, size: usize) { + self.flush_size += size; + self.flush_count += 1; + } + + pub(crate) fn report(self, metrics: &KvLogStoreMetrics) { + metrics.storage_write_count.inc_by(self.flush_count as _); + metrics.storage_write_size.inc_by(self.flush_size as _); + } +} + +pub struct KvLogStoreFactory { + state_store: S, + + table_catalog: Table, + + vnodes: Option>, + + max_row_count: usize, + + metrics: KvLogStoreMetrics, +} + +impl KvLogStoreFactory { + pub(crate) fn new( + state_store: S, + table_catalog: Table, + vnodes: Option>, + max_row_count: usize, + metrics: KvLogStoreMetrics, + ) -> Self { + Self { + state_store, + table_catalog, + vnodes, + max_row_count, + metrics, + } + } +} + +impl LogStoreFactory for KvLogStoreFactory { + type Reader = KvLogStoreReader; + type Writer = KvLogStoreWriter; + + async fn build(self) -> (Self::Reader, Self::Writer) { + let table_id = TableId::new(self.table_catalog.id); + let serde = LogStoreRowSerde::new(&self.table_catalog, self.vnodes); + let local_state_store = self + .state_store + .new_local(NewLocalOptions { + table_id: TableId { + table_id: self.table_catalog.id, + }, + is_consistent_op: false, + table_option: TableOption { + retention_seconds: None, + }, + is_replicated: false, + }) + .await; + + let (tx, rx) = new_log_store_buffer(self.max_row_count); + + let reader = KvLogStoreReader::new( + table_id, + self.state_store, + serde.clone(), + rx, + self.metrics.clone(), + ); + + let writer = KvLogStoreWriter::new(table_id, local_state_store, serde, tx, self.metrics); + + (reader, writer) + } +} + +#[cfg(test)] +mod tests { + use std::future::{poll_fn, Future}; + use std::pin::pin; + use std::sync::Arc; + use std::task::Poll; + + use risingwave_common::buffer::{Bitmap, BitmapBuilder}; + use risingwave_common::hash::VirtualNode; + use risingwave_common::util::epoch::EpochPair; + use risingwave_connector::sink::log_store::{ + LogReader, LogStoreFactory, LogStoreReadItem, LogWriter, TruncateOffset, + }; + use risingwave_hummock_sdk::HummockReadEpoch; + use risingwave_hummock_test::test_utils::prepare_hummock_test_env; + use risingwave_storage::store::SyncResult; + use risingwave_storage::StateStore; + + use crate::common::log_store_impl::kv_log_store::test_utils::{ + calculate_vnode_bitmap, check_rows_eq, check_stream_chunk_eq, + gen_multi_vnode_stream_chunks, gen_stream_chunk, gen_test_log_store_table, TEST_DATA_SIZE, + }; + use crate::common::log_store_impl::kv_log_store::{KvLogStoreFactory, KvLogStoreMetrics}; + + #[tokio::test] + async fn test_basic() { + for count in 0..20 { + test_basic_inner(count * TEST_DATA_SIZE).await + } + } + + async fn test_basic_inner(max_row_count: usize) { + let test_env = prepare_hummock_test_env().await; + + let table = gen_test_log_store_table(); + + test_env.register_table(table.clone()).await; + + let stream_chunk1 = gen_stream_chunk(0); + let stream_chunk2 = gen_stream_chunk(10); + let bitmap = calculate_vnode_bitmap(stream_chunk1.rows().chain(stream_chunk2.rows())); + + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(Arc::new(bitmap)), + max_row_count, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + + let epoch1 = test_env + .storage + .get_pinned_version() + .version() + .max_committed_epoch + + 1; + writer + .init(EpochPair::new_test_epoch(epoch1)) + .await + .unwrap(); + writer.write_chunk(stream_chunk1.clone()).await.unwrap(); + let epoch2 = epoch1 + 1; + writer.flush_current_epoch(epoch2, false).await.unwrap(); + writer.write_chunk(stream_chunk2.clone()).await.unwrap(); + let epoch3 = epoch2 + 1; + writer.flush_current_epoch(epoch3, true).await.unwrap(); + + test_env.storage.seal_epoch(epoch1, false); + test_env.storage.seal_epoch(epoch2, true); + let sync_result: SyncResult = test_env.storage.sync(epoch2).await.unwrap(); + assert!(!sync_result.uncommitted_ssts.is_empty()); + + reader.init().await.unwrap(); + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(!is_checkpoint) + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&stream_chunk2, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + } + + #[tokio::test] + async fn test_recovery() { + for count in 0..20 { + test_recovery_inner(count * TEST_DATA_SIZE).await + } + } + + async fn test_recovery_inner(max_row_count: usize) { + let test_env = prepare_hummock_test_env().await; + + let table = gen_test_log_store_table(); + + test_env.register_table(table.clone()).await; + + let stream_chunk1 = gen_stream_chunk(0); + let stream_chunk2 = gen_stream_chunk(10); + let bitmap = calculate_vnode_bitmap(stream_chunk1.rows().chain(stream_chunk2.rows())); + let bitmap = Arc::new(bitmap); + + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(bitmap.clone()), + max_row_count, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + + let epoch1 = test_env + .storage + .get_pinned_version() + .version() + .max_committed_epoch + + 1; + writer + .init(EpochPair::new_test_epoch(epoch1)) + .await + .unwrap(); + writer.write_chunk(stream_chunk1.clone()).await.unwrap(); + let epoch2 = epoch1 + 1; + writer.flush_current_epoch(epoch2, false).await.unwrap(); + writer.write_chunk(stream_chunk2.clone()).await.unwrap(); + let epoch3 = epoch2 + 1; + writer.flush_current_epoch(epoch3, true).await.unwrap(); + + test_env.storage.seal_epoch(epoch1, false); + + reader.init().await.unwrap(); + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(!is_checkpoint) + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&stream_chunk2, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + + test_env.commit_epoch(epoch2).await; + // The truncate does not work because it is after the sync + reader + .truncate(TruncateOffset::Barrier { epoch: epoch2 }) + .await + .unwrap(); + test_env + .storage + .try_wait_epoch(HummockReadEpoch::Committed(epoch2)) + .await + .unwrap(); + + // Recovery + test_env.storage.clear_shared_buffer().await.unwrap(); + + // Rebuild log reader and writer in recovery + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(bitmap), + max_row_count, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + writer + .init(EpochPair::new_test_epoch(epoch3)) + .await + .unwrap(); + reader.init().await.unwrap(); + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(!is_checkpoint) + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&stream_chunk2, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + } + + #[tokio::test] + async fn test_truncate() { + for count in 2..10 { + test_truncate_inner(count).await + } + } + + async fn test_truncate_inner(max_row_count: usize) { + let test_env = prepare_hummock_test_env().await; + + let table = gen_test_log_store_table(); + + test_env.register_table(table.clone()).await; + + let stream_chunk1_1 = gen_stream_chunk(0); + let stream_chunk1_2 = gen_stream_chunk(10); + let stream_chunk2 = gen_stream_chunk(20); + let stream_chunk3 = gen_stream_chunk(20); + let bitmap = calculate_vnode_bitmap( + stream_chunk1_1 + .rows() + .chain(stream_chunk1_2.rows()) + .chain(stream_chunk2.rows()) + .chain(stream_chunk3.rows()), + ); + let bitmap = Arc::new(bitmap); + + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(bitmap.clone()), + max_row_count, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + + let epoch1 = test_env + .storage + .get_pinned_version() + .version() + .max_committed_epoch + + 1; + writer + .init(EpochPair::new_test_epoch(epoch1)) + .await + .unwrap(); + writer.write_chunk(stream_chunk1_1.clone()).await.unwrap(); + writer.write_chunk(stream_chunk1_2.clone()).await.unwrap(); + let epoch2 = epoch1 + 1; + writer.flush_current_epoch(epoch2, true).await.unwrap(); + writer.write_chunk(stream_chunk2.clone()).await.unwrap(); + + test_env.commit_epoch(epoch1).await; + + reader.init().await.unwrap(); + let chunk_id1 = match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + chunk_id, + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1_1, &read_stream_chunk)); + chunk_id + } + _ => unreachable!(), + }; + let chunk_id2 = match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + chunk_id, + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1_2, &read_stream_chunk)); + chunk_id + } + _ => unreachable!(), + }; + assert!(chunk_id2 > chunk_id1); + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&stream_chunk2, &read_stream_chunk)); + } + _ => unreachable!(), + } + + // The truncate should work because it is before the flush + reader + .truncate(TruncateOffset::Chunk { + epoch: epoch1, + chunk_id: chunk_id1, + }) + .await + .unwrap(); + let epoch3 = epoch2 + 1; + writer.flush_current_epoch(epoch3, true).await.unwrap(); + + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + + // Truncation on epoch1 should work because it is before this sync + test_env.commit_epoch(epoch2).await; + test_env + .storage + .try_wait_epoch(HummockReadEpoch::Committed(epoch2)) + .await + .unwrap(); + + // Recovery + test_env.storage.clear_shared_buffer().await.unwrap(); + + // Rebuild log reader and writer in recovery + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(bitmap), + max_row_count, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + + writer + .init(EpochPair::new_test_epoch(epoch3)) + .await + .unwrap(); + + writer.write_chunk(stream_chunk3.clone()).await.unwrap(); + + reader.init().await.unwrap(); + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1_2, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&stream_chunk2, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch3); + assert!(check_stream_chunk_eq(&stream_chunk3, &read_stream_chunk)); + } + _ => unreachable!(), + } + } + + #[tokio::test] + async fn test_update_vnode_recover() { + let test_env = prepare_hummock_test_env().await; + + let table = gen_test_log_store_table(); + + test_env.register_table(table.clone()).await; + + fn build_bitmap(indexes: impl Iterator) -> Arc { + let mut builder = BitmapBuilder::zeroed(VirtualNode::COUNT); + for i in indexes { + builder.set(i, true); + } + Arc::new(builder.finish()) + } + + let vnodes1 = build_bitmap((0..VirtualNode::COUNT).filter(|i| i % 2 == 0)); + let vnodes2 = build_bitmap((0..VirtualNode::COUNT).filter(|i| i % 2 == 1)); + + let factory1 = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(vnodes1), + 10 * TEST_DATA_SIZE, + KvLogStoreMetrics::for_test(), + ); + let factory2 = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(vnodes2), + 10 * TEST_DATA_SIZE, + KvLogStoreMetrics::for_test(), + ); + let (mut reader1, mut writer1) = factory1.build().await; + let (mut reader2, mut writer2) = factory2.build().await; + + let epoch1 = test_env + .storage + .get_pinned_version() + .version() + .max_committed_epoch + + 1; + writer1 + .init(EpochPair::new_test_epoch(epoch1)) + .await + .unwrap(); + writer2 + .init(EpochPair::new_test_epoch(epoch1)) + .await + .unwrap(); + reader1.init().await.unwrap(); + reader2.init().await.unwrap(); + let [chunk1_1, chunk1_2] = gen_multi_vnode_stream_chunks::<2>(0, 100); + writer1.write_chunk(chunk1_1.clone()).await.unwrap(); + writer2.write_chunk(chunk1_2.clone()).await.unwrap(); + let epoch2 = epoch1 + 1; + writer1.flush_current_epoch(epoch2, false).await.unwrap(); + writer2.flush_current_epoch(epoch2, false).await.unwrap(); + let [chunk2_1, chunk2_2] = gen_multi_vnode_stream_chunks::<2>(200, 100); + writer1.write_chunk(chunk2_1.clone()).await.unwrap(); + writer2.write_chunk(chunk2_2.clone()).await.unwrap(); + + match reader1.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, .. }) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&chunk1_1, &chunk)); + } + _ => unreachable!(), + }; + match reader1.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(!is_checkpoint); + } + _ => unreachable!(), + } + + match reader2.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, .. }) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&chunk1_2, &chunk)); + } + _ => unreachable!(), + } + match reader2.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(!is_checkpoint); + } + _ => unreachable!(), + } + + // Only reader1 will truncate + reader1 + .truncate(TruncateOffset::Barrier { epoch: epoch1 }) + .await + .unwrap(); + + match reader1.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, .. }) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&chunk2_1, &chunk)); + } + _ => unreachable!(), + } + match reader2.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, .. }) => { + assert_eq!(epoch, epoch2); + assert!(check_stream_chunk_eq(&chunk2_2, &chunk)); + } + _ => unreachable!(), + } + + let epoch3 = epoch2 + 1; + writer1.flush_current_epoch(epoch3, true).await.unwrap(); + writer2.flush_current_epoch(epoch3, true).await.unwrap(); + + match reader1.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint); + } + _ => unreachable!(), + } + match reader2.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint); + } + _ => unreachable!(), + } + + // Truncation of reader1 on epoch1 should work because it is before this sync + test_env.storage.seal_epoch(epoch1, false); + test_env.commit_epoch(epoch2).await; + test_env + .storage + .try_wait_epoch(HummockReadEpoch::Committed(epoch2)) + .await + .unwrap(); + + // Recovery + test_env.storage.clear_shared_buffer().await.unwrap(); + + let vnodes = build_bitmap(0..VirtualNode::COUNT); + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(vnodes), + 10 * TEST_DATA_SIZE, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + writer.init(EpochPair::new(epoch3, epoch2)).await.unwrap(); + reader.init().await.unwrap(); + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, .. }) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&chunk1_2, &chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(!is_checkpoint); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::StreamChunk { chunk, .. }) => { + assert_eq!(epoch, epoch2); + assert!(check_rows_eq( + chunk2_1.rows().chain(chunk2_2.rows()), + chunk.rows() + )); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch2); + assert!(is_checkpoint); + } + _ => unreachable!(), + } + } + + #[tokio::test] + async fn test_cancellation_safe() { + let test_env = prepare_hummock_test_env().await; + + let table = gen_test_log_store_table(); + + test_env.register_table(table.clone()).await; + + let stream_chunk1 = gen_stream_chunk(0); + let stream_chunk2 = gen_stream_chunk(10); + let bitmap = calculate_vnode_bitmap(stream_chunk1.rows().chain(stream_chunk2.rows())); + + let factory = KvLogStoreFactory::new( + test_env.storage.clone(), + table.clone(), + Some(Arc::new(bitmap)), + 0, + KvLogStoreMetrics::for_test(), + ); + let (mut reader, mut writer) = factory.build().await; + + let epoch1 = test_env + .storage + .get_pinned_version() + .version() + .max_committed_epoch + + 1; + writer + .init(EpochPair::new_test_epoch(epoch1)) + .await + .unwrap(); + writer.write_chunk(stream_chunk1.clone()).await.unwrap(); + let epoch2 = epoch1 + 1; + writer.flush_current_epoch(epoch2, true).await.unwrap(); + + reader.init().await.unwrap(); + + { + let mut future = pin!(reader.next_item()); + assert!(poll_fn(|cx| Poll::Ready(future.as_mut().poll(cx))) + .await + .is_pending()); + } + + match reader.next_item().await.unwrap() { + ( + epoch, + LogStoreReadItem::StreamChunk { + chunk: read_stream_chunk, + .. + }, + ) => { + assert_eq!(epoch, epoch1); + assert!(check_stream_chunk_eq(&stream_chunk1, &read_stream_chunk)); + } + _ => unreachable!(), + } + match reader.next_item().await.unwrap() { + (epoch, LogStoreReadItem::Barrier { is_checkpoint }) => { + assert_eq!(epoch, epoch1); + assert!(is_checkpoint) + } + _ => unreachable!(), + } + } +} diff --git a/src/stream/src/common/log_store_impl/kv_log_store/reader.rs b/src/stream/src/common/log_store_impl/kv_log_store/reader.rs new file mode 100644 index 0000000000000..cb7fc402168d4 --- /dev/null +++ b/src/stream/src/common/log_store_impl/kv_log_store/reader.rs @@ -0,0 +1,343 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::ops::Bound::{Excluded, Included}; +use std::pin::Pin; + +use anyhow::anyhow; +use bytes::Bytes; +use futures::future::{try_join_all, BoxFuture}; +use futures::stream::select_all; +use futures::FutureExt; +use risingwave_common::array::StreamChunk; +use risingwave_common::cache::CachePriority; +use risingwave_common::catalog::TableId; +use risingwave_common::hash::VnodeBitmapExt; +use risingwave_connector::sink::log_store::{ + ChunkId, LogReader, LogStoreReadItem, LogStoreResult, TruncateOffset, +}; +use risingwave_hummock_sdk::key::TableKey; +use risingwave_storage::hummock::CachePolicy; +use risingwave_storage::store::{PrefetchOptions, ReadOptions}; +use risingwave_storage::StateStore; +use tokio_stream::StreamExt; + +use crate::common::log_store_impl::kv_log_store::buffer::{ + LogStoreBufferItem, LogStoreBufferReceiver, +}; +use crate::common::log_store_impl::kv_log_store::serde::{ + merge_log_store_item_stream, KvLogStoreItem, LogStoreItemMergeStream, LogStoreRowSerde, +}; +use crate::common::log_store_impl::kv_log_store::KvLogStoreMetrics; + +pub struct KvLogStoreReader { + table_id: TableId, + + state_store: S, + + serde: LogStoreRowSerde, + + rx: LogStoreBufferReceiver, + + /// The first epoch that newly written by the log writer + first_write_epoch: Option, + + /// `Some` means consuming historical log data + state_store_stream: Option>>>, + + /// Store the future that attempts to read a flushed stream chunk. + /// This is for cancellation safety. Since it is possible that the future of `next_item` + /// gets dropped after it takes an flushed item out from the buffer, but before it successfully + /// read the stream chunk out from the storage. Therefore we store the future so that it can continue + /// reading the stream chunk after the next `next_item` is called. + read_flushed_chunk_future: + Option>>, + + latest_offset: TruncateOffset, + + truncate_offset: TruncateOffset, + + metrics: KvLogStoreMetrics, +} + +impl KvLogStoreReader { + pub(crate) fn new( + table_id: TableId, + state_store: S, + serde: LogStoreRowSerde, + rx: LogStoreBufferReceiver, + metrics: KvLogStoreMetrics, + ) -> Self { + Self { + table_id, + state_store, + serde, + rx, + read_flushed_chunk_future: None, + first_write_epoch: None, + state_store_stream: None, + latest_offset: TruncateOffset::Barrier { epoch: 0 }, + truncate_offset: TruncateOffset::Barrier { epoch: 0 }, + metrics, + } + } + + async fn may_continue_read_flushed_chunk( + &mut self, + ) -> LogStoreResult> { + if let Some(future) = self.read_flushed_chunk_future.as_mut() { + let result = future.await; + self.read_flushed_chunk_future + .take() + .expect("future not None"); + Ok(Some(result?)) + } else { + Ok(None) + } + } +} + +impl LogReader for KvLogStoreReader { + async fn init(&mut self) -> LogStoreResult<()> { + let first_write_epoch = self.rx.init().await; + let streams = try_join_all(self.serde.vnodes().iter_vnodes().map(|vnode| { + let range_start = TableKey(Bytes::from(Vec::from(vnode.to_be_bytes()))); + let range_end = self.serde.serialize_epoch(vnode, first_write_epoch); + let table_id = self.table_id; + let state_store = self.state_store.clone(); + async move { + state_store + .iter( + (Included(range_start), Excluded(range_end)), + u64::MAX, + ReadOptions { + prefetch_options: PrefetchOptions::new_for_exhaust_iter(), + cache_policy: CachePolicy::Fill(CachePriority::Low), + table_id, + ..Default::default() + }, + ) + .await + } + })) + .await?; + + assert!( + self.first_write_epoch.replace(first_write_epoch).is_none(), + "should not init twice" + ); + // TODO: set chunk size by config + self.state_store_stream = Some(Box::pin(merge_log_store_item_stream( + streams, + self.serde.clone(), + 1024, + self.metrics.persistent_log_read_metrics.clone(), + ))); + Ok(()) + } + + async fn next_item(&mut self) -> LogStoreResult<(u64, LogStoreReadItem)> { + if let Some(state_store_stream) = &mut self.state_store_stream { + match state_store_stream.try_next().await? { + Some((epoch, item)) => { + self.latest_offset.check_next_item_epoch(epoch)?; + let item = match item { + KvLogStoreItem::StreamChunk(chunk) => { + let chunk_id = self.latest_offset.next_chunk_id(); + self.latest_offset = TruncateOffset::Chunk { epoch, chunk_id }; + LogStoreReadItem::StreamChunk { chunk, chunk_id } + } + KvLogStoreItem::Barrier { is_checkpoint } => { + self.latest_offset = TruncateOffset::Barrier { epoch }; + LogStoreReadItem::Barrier { is_checkpoint } + } + }; + return Ok((epoch, item)); + } + None => { + self.state_store_stream = None; + } + } + } + + // It is possible that the future gets dropped after it pops a flushed + // item but before it reads a stream chunk. Therefore, we may continue + // driving the future to continue reading the stream chunk. + if let Some((chunk_id, chunk, item_epoch)) = self.may_continue_read_flushed_chunk().await? { + let offset = TruncateOffset::Chunk { + epoch: item_epoch, + chunk_id, + }; + assert!(offset > self.latest_offset); + self.latest_offset = offset; + return Ok(( + item_epoch, + LogStoreReadItem::StreamChunk { chunk, chunk_id }, + )); + } + + // Now the historical state store has been consumed. + let (item_epoch, item) = self.rx.next_item().await; + self.latest_offset.check_next_item_epoch(item_epoch)?; + Ok(match item { + LogStoreBufferItem::StreamChunk { + chunk, chunk_id, .. + } => { + let offset = TruncateOffset::Chunk { + epoch: item_epoch, + chunk_id, + }; + assert!(offset > self.latest_offset); + self.latest_offset = offset; + ( + item_epoch, + LogStoreReadItem::StreamChunk { chunk, chunk_id }, + ) + } + LogStoreBufferItem::Flushed { + vnode_bitmap, + start_seq_id, + end_seq_id, + chunk_id, + } => { + let read_flushed_chunk_future = { + let serde = self.serde.clone(); + let state_store = self.state_store.clone(); + let table_id = self.table_id; + let read_metrics = self.metrics.flushed_buffer_read_metrics.clone(); + async move { + let streams = try_join_all(vnode_bitmap.iter_vnodes().map(|vnode| { + let range_start = + serde.serialize_log_store_pk(vnode, item_epoch, Some(start_seq_id)); + let range_end = + serde.serialize_log_store_pk(vnode, item_epoch, Some(end_seq_id)); + let state_store = &state_store; + + // Use u64::MAX here because the epoch to consume may be below the safe + // epoch + async move { + Ok::<_, anyhow::Error>(Box::pin( + state_store + .iter( + (Included(range_start), Included(range_end)), + u64::MAX, + ReadOptions { + prefetch_options: + PrefetchOptions::new_for_exhaust_iter(), + cache_policy: CachePolicy::Fill(CachePriority::Low), + table_id, + ..Default::default() + }, + ) + .await?, + )) + } + })) + .await?; + let combined_stream = select_all(streams); + + let chunk = serde + .deserialize_stream_chunk( + combined_stream, + start_seq_id, + end_seq_id, + item_epoch, + &read_metrics, + ) + .await?; + + Ok((chunk_id, chunk, item_epoch)) + } + .boxed() + }; + + // Store the future in case that in the subsequent pending await point, + // the future is cancelled, and we lose an flushed item. + assert!(self + .read_flushed_chunk_future + .replace(read_flushed_chunk_future) + .is_none()); + + // for cancellation test + #[cfg(test)] + { + use std::time::Duration; + + use tokio::time::sleep; + sleep(Duration::from_secs(1)).await; + } + + let (_, chunk, _) = self + .may_continue_read_flushed_chunk() + .await? + .expect("future just insert. unlikely to be none"); + + let offset = TruncateOffset::Chunk { + epoch: item_epoch, + chunk_id, + }; + assert!(offset > self.latest_offset); + self.latest_offset = offset; + ( + item_epoch, + LogStoreReadItem::StreamChunk { chunk, chunk_id }, + ) + } + LogStoreBufferItem::Barrier { + is_checkpoint, + next_epoch, + } => { + assert!( + item_epoch < next_epoch, + "next epoch {} should be greater than current epoch {}", + next_epoch, + item_epoch + ); + self.latest_offset = TruncateOffset::Barrier { epoch: item_epoch }; + (item_epoch, LogStoreReadItem::Barrier { is_checkpoint }) + } + LogStoreBufferItem::UpdateVnodes(bitmap) => { + self.serde.update_vnode_bitmap(bitmap.clone()); + (item_epoch, LogStoreReadItem::UpdateVnodeBitmap(bitmap)) + } + }) + } + + async fn truncate(&mut self, offset: TruncateOffset) -> LogStoreResult<()> { + if offset > self.latest_offset { + return Err(anyhow!( + "truncate at a later offset {:?} than the current latest offset {:?}", + offset, + self.latest_offset + )); + } + if offset <= self.truncate_offset { + return Err(anyhow!( + "truncate offset {:?} earlier than prev truncate offset {:?}", + offset, + self.truncate_offset + )); + } + if offset.epoch() >= self.first_write_epoch.expect("should have init") { + self.rx.truncate(offset); + } else { + // For historical data, no need to truncate at seq id level. Only truncate at barrier. + if let TruncateOffset::Barrier { .. } = &offset { + self.rx.truncate(offset); + } + } + self.truncate_offset = offset; + Ok(()) + } +} diff --git a/src/stream/src/common/log_store/kv_log_store/serde.rs b/src/stream/src/common/log_store_impl/kv_log_store/serde.rs similarity index 63% rename from src/stream/src/common/log_store/kv_log_store/serde.rs rename to src/stream/src/common/log_store_impl/kv_log_store/serde.rs index 15825e9f275e6..d3102aa936fad 100644 --- a/src/stream/src/common/log_store/kv_log_store/serde.rs +++ b/src/stream/src/common/log_store_impl/kv_log_store/serde.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use anyhow::anyhow; use bytes::Bytes; -use futures::stream::{FuturesUnordered, StreamFuture}; +use futures::stream::{FuturesUnordered, Peekable, StreamFuture}; use futures::{pin_mut, Stream, StreamExt, TryStreamExt}; use futures_async_stream::try_stream; use itertools::Itertools; @@ -29,6 +29,7 @@ use risingwave_common::constants::log_store::{ EPOCH_COLUMN_INDEX, EPOCH_COLUMN_TYPE, KV_LOG_STORE_PREDEFINED_COLUMNS, PK_TYPES, ROW_OP_COLUMN_INDEX, SEQ_ID_COLUMN_INDEX, }; +use risingwave_common::estimate_size::EstimateSize; use risingwave_common::hash::VirtualNode; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::types::{DataType, ScalarImpl}; @@ -38,17 +39,18 @@ use risingwave_common::util::sort_util::OrderType; use risingwave_common::util::value_encoding::{ BasicSerde, ValueRowDeserializer, ValueRowSerializer, }; -use risingwave_hummock_sdk::key::next_key; +use risingwave_connector::sink::log_store::LogStoreResult; +use risingwave_hummock_sdk::key::{next_key, TableKey}; use risingwave_pb::catalog::Table; +use risingwave_storage::error::StorageError; use risingwave_storage::row_serde::row_serde_util::serialize_pk_with_vnode; use risingwave_storage::row_serde::value_serde::ValueRowSerdeNew; use risingwave_storage::store::StateStoreReadIterStream; use risingwave_storage::table::{compute_vnode, Distribution}; -use crate::common::log_store::kv_log_store::{ - ReaderTruncationOffsetType, RowOpCodeType, SeqIdType, +use crate::common::log_store_impl::kv_log_store::{ + KvLogStoreReadMetrics, ReaderTruncationOffsetType, RowOpCodeType, SeqIdType, }; -use crate::common::log_store::{LogStoreError, LogStoreReadItem, LogStoreResult}; const INSERT_OP_CODE: RowOpCodeType = 1; const DELETE_OP_CODE: RowOpCodeType = 2; @@ -57,6 +59,32 @@ const UPDATE_DELETE_OP_CODE: RowOpCodeType = 4; const BARRIER_OP_CODE: RowOpCodeType = 5; const CHECKPOINT_BARRIER_OP_CODE: RowOpCodeType = 6; +struct ReadInfo { + read_size: usize, + read_count: usize, +} + +impl ReadInfo { + fn new() -> Self { + Self { + read_count: 0, + read_size: 0, + } + } + + fn read_one_row(&mut self, size: usize) { + self.read_count += 1; + self.read_size += size; + } + + fn report(&mut self, metrics: &KvLogStoreReadMetrics) { + metrics.storage_read_size.inc_by(self.read_size as _); + metrics.storage_read_count.inc_by(self.read_count as _); + self.read_size = 0; + self.read_count = 0; + } +} + #[derive(Eq, PartialEq, Debug)] enum LogStoreRowOp { Row { op: Op, row: OwnedRow }, @@ -174,7 +202,7 @@ impl LogStoreRowSerde { seq_id: SeqIdType, op: Op, row: impl Row, - ) -> (VirtualNode, Bytes, Bytes) { + ) -> (VirtualNode, TableKey, Bytes) { let pk = [ Some(ScalarImpl::Int64(Self::encode_epoch(epoch))), Some(ScalarImpl::Int32(seq_id)), @@ -200,7 +228,7 @@ impl LogStoreRowSerde { epoch: u64, vnode: VirtualNode, is_checkpoint: bool, - ) -> (Bytes, Bytes) { + ) -> (TableKey, Bytes) { let pk = [Some(ScalarImpl::Int64(Self::encode_epoch(epoch))), None]; let op_code = if is_checkpoint { @@ -218,7 +246,7 @@ impl LogStoreRowSerde { (key_bytes, value_bytes) } - pub(crate) fn serialize_epoch(&self, vnode: VirtualNode, epoch: u64) -> Bytes { + pub(crate) fn serialize_epoch(&self, vnode: VirtualNode, epoch: u64) -> TableKey { serialize_pk_with_vnode( [Some(ScalarImpl::Int64(Self::encode_epoch(epoch)))], &self.epoch_serde, @@ -230,12 +258,12 @@ impl LogStoreRowSerde { &self, vnode: VirtualNode, epoch: u64, - seq_id: SeqIdType, - ) -> Bytes { + seq_id: Option, + ) -> TableKey { serialize_pk_with_vnode( [ Some(ScalarImpl::Int64(Self::encode_epoch(epoch))), - Some(ScalarImpl::Int32(seq_id)), + seq_id.map(ScalarImpl::Int32), ], &self.pk_serde, vnode, @@ -247,7 +275,8 @@ impl LogStoreRowSerde { vnode: VirtualNode, offset: ReaderTruncationOffsetType, ) -> Bytes { - let curr_offset = self.serialize_epoch(vnode, offset); + let (epoch, seq_id) = offset; + let curr_offset = self.serialize_log_store_pk(vnode, epoch, seq_id); let ret = Bytes::from(next_key(&curr_offset)); assert!(!ret.is_empty()); ret @@ -302,46 +331,49 @@ impl LogStoreRowSerde { start_seq_id: SeqIdType, end_seq_id: SeqIdType, expected_epoch: u64, + metrics: &KvLogStoreReadMetrics, ) -> LogStoreResult { pin_mut!(stream); let size_bound = (end_seq_id - start_seq_id + 1) as usize; let mut data_chunk_builder = DataChunkBuilder::new(self.payload_schema.clone(), size_bound + 1); let mut ops = Vec::with_capacity(size_bound); - while let Some((_, value)) = stream.try_next().await? { + let mut read_info = ReadInfo::new(); + while let Some((key, value)) = stream.try_next().await? { + read_info + .read_one_row(key.user_key.table_key.estimated_size() + value.estimated_size()); match self.deserialize(value)? { (epoch, LogStoreRowOp::Row { op, row }) => { if epoch != expected_epoch { - return Err(LogStoreError::Internal(anyhow!( + return Err(anyhow!( "decoded epoch {} not match expected epoch {}", epoch, expected_epoch - ))); + )); } ops.push(op); if ops.len() > size_bound { - return Err(LogStoreError::Internal(anyhow!( + return Err(anyhow!( "row count {} exceed size bound {}", ops.len(), size_bound - ))); + )); } assert!(data_chunk_builder.append_one_row(row).is_none()); } (_, LogStoreRowOp::Barrier { .. }) => { - return Err(LogStoreError::Internal(anyhow!( - "should not get barrier when decoding stream chunk" - ))); + return Err(anyhow!("should not get barrier when decoding stream chunk")); } } } if ops.is_empty() { - return Err(LogStoreError::Internal(anyhow!( + return Err(anyhow!( "should not get empty row when decoding stream chunk. start seq id: {}, end seq id {}", start_seq_id, - end_seq_id)) + end_seq_id) ); } + read_info.report(metrics); Ok(StreamChunk::from_parts( ops, data_chunk_builder @@ -367,59 +399,46 @@ enum StreamState { BarrierEmitted { prev_epoch: u64 }, } +pub(crate) enum KvLogStoreItem { + StreamChunk(StreamChunk), + Barrier { is_checkpoint: bool }, +} + +type BoxPeekableLogStoreItemStream = Pin>>>; + struct LogStoreRowOpStream { serde: LogStoreRowSerde, /// Streams that have not reached a barrier - row_streams: FuturesUnordered>>>, + row_streams: FuturesUnordered>>, /// Streams that have reached a barrier - barrier_streams: Vec>>, + barrier_streams: Vec>, + + not_started_streams: Vec<(u64, BoxPeekableLogStoreItemStream)>, stream_state: StreamState, + + metrics: KvLogStoreReadMetrics, } impl LogStoreRowOpStream { - pub(crate) fn new(streams: Vec, serde: LogStoreRowSerde) -> Self { + pub(crate) fn new( + streams: Vec, + serde: LogStoreRowSerde, + metrics: KvLogStoreReadMetrics, + ) -> Self { assert!(!streams.is_empty()); Self { - serde, - barrier_streams: Vec::with_capacity(streams.len()), - row_streams: streams + serde: serde.clone(), + barrier_streams: streams .into_iter() - .map(|s| Box::pin(s).into_future()) + .map(|s| Box::pin(deserialize_stream(s, serde.clone()).peekable())) .collect(), + row_streams: FuturesUnordered::new(), + not_started_streams: Vec::new(), stream_state: StreamState::Uninitialized, - } - } - - fn check_epoch(&self, epoch: u64) -> LogStoreResult<()> { - match &self.stream_state { - StreamState::Uninitialized => Ok(()), - StreamState::AllConsumingRow { curr_epoch } - | StreamState::BarrierAligning { curr_epoch, .. } => { - if *curr_epoch != epoch { - Err(LogStoreError::Internal(anyhow!( - "epoch {} does not match with current epoch {}", - epoch, - curr_epoch - ))) - } else { - Ok(()) - } - } - - StreamState::BarrierEmitted { prev_epoch } => { - if *prev_epoch >= epoch { - Err(LogStoreError::Internal(anyhow!( - "epoch {} should be greater than prev epoch {}", - epoch, - prev_epoch - ))) - } else { - Ok(()) - } - } + metrics, } } @@ -432,86 +451,213 @@ impl LogStoreRowOpStream { if is_checkpoint == *curr_is_checkpoint { Ok(()) } else { - Err(LogStoreError::Internal(anyhow!( + Err(anyhow!( "current aligning barrier is_checkpoint: {}, current barrier is_checkpoint {}", curr_is_checkpoint, is_checkpoint - ))) + )) } } else { Ok(()) } } - #[try_stream(ok = (u64, LogStoreReadItem), error = LogStoreError)] - async fn into_log_store_item_stream(self, chunk_size: usize) { + #[try_stream(ok = (u64, KvLogStoreItem), error = anyhow::Error)] + async fn into_log_store_item_stream(mut self, chunk_size: usize) { let mut ops = Vec::with_capacity(chunk_size); let mut data_chunk_builder = DataChunkBuilder::new(self.serde.payload_schema.clone(), chunk_size); + if !self.init().await? { + // no data in all stream + return Ok(()); + } + let this = self; pin_mut!(this); - while let Some((epoch, row_op)) = this.next_op().await? { + while let Some((epoch, row_op, row_read_size)) = this.next_op().await? { + let mut read_info = ReadInfo::new(); + read_info.read_one_row(row_read_size); match row_op { LogStoreRowOp::Row { op, row } => { ops.push(op); if let Some(chunk) = data_chunk_builder.append_one_row(row) { let ops = replace(&mut ops, Vec::with_capacity(chunk_size)); + read_info.report(&this.metrics); yield ( epoch, - LogStoreReadItem::StreamChunk(StreamChunk::from_parts(ops, chunk)), + KvLogStoreItem::StreamChunk(StreamChunk::from_parts(ops, chunk)), ); } } LogStoreRowOp::Barrier { is_checkpoint } => { + read_info.report(&this.metrics); if let Some(chunk) = data_chunk_builder.consume_all() { let ops = replace(&mut ops, Vec::with_capacity(chunk_size)); yield ( epoch, - LogStoreReadItem::StreamChunk(StreamChunk::from_parts(ops, chunk)), + KvLogStoreItem::StreamChunk(StreamChunk::from_parts(ops, chunk)), ); } - yield (epoch, LogStoreReadItem::Barrier { is_checkpoint }) + yield (epoch, KvLogStoreItem::Barrier { is_checkpoint }) } } } } } -pub(crate) type LogStoreItemStream = impl Stream>; -pub(crate) fn new_log_store_item_stream( +pub(crate) type LogStoreItemMergeStream = + impl Stream>; +pub(crate) fn merge_log_store_item_stream( streams: Vec, serde: LogStoreRowSerde, chunk_size: usize, + metrics: KvLogStoreReadMetrics, +) -> LogStoreItemMergeStream { + LogStoreRowOpStream::new(streams, serde, metrics).into_log_store_item_stream(chunk_size) +} + +type LogStoreItemStream = + impl Stream> + Send; +fn deserialize_stream( + stream: S, + serde: LogStoreRowSerde, ) -> LogStoreItemStream { - LogStoreRowOpStream::new(streams, serde).into_log_store_item_stream(chunk_size) + stream.map( + move |result: Result<_, StorageError>| -> LogStoreResult<(u64, LogStoreRowOp, usize)> { + match result { + Ok((key, value)) => { + let read_size = + key.user_key.table_key.estimated_size() + value.estimated_size(); + let (epoch, op) = serde.deserialize(value)?; + Ok((epoch, op, read_size)) + } + Err(e) => Err(e.into()), + } + }, + ) } impl LogStoreRowOpStream { - async fn next_op(&mut self) -> LogStoreResult> { - assert!(!self.row_streams.is_empty()); + // Return Ok(false) means all streams have reach the end. + async fn init(&mut self) -> LogStoreResult { + match &self.stream_state { + StreamState::Uninitialized => {} + _ => unreachable!("cannot call init for twice"), + }; + + // before init, all streams are in `barrier_streams` + assert!( + self.row_streams.is_empty(), + "when uninitialized, row_streams should be empty" + ); + assert!(self.not_started_streams.is_empty()); + assert!(!self.barrier_streams.is_empty()); + + for mut stream in self.barrier_streams.drain(..) { + match stream.as_mut().peek().await { + Some(Ok((epoch, _, _))) => { + self.not_started_streams.push((*epoch, stream)); + } + Some(Err(_)) => match stream.next().await { + Some(Err(e)) => { + return Err(e); + } + _ => unreachable!("on peek we have checked it's Some(Err(_))"), + }, + None => { + continue; + } + } + } + + if self.not_started_streams.is_empty() { + // No stream has data + return Ok(false); + } + + // sorted by epoch descending. Earlier epoch at the end + self.not_started_streams + .sort_by_key(|(epoch, _)| u64::MAX - *epoch); + + let (epoch, stream) = self + .not_started_streams + .pop() + .expect("have check non-empty"); + self.row_streams.push(stream.into_future()); + while let Some((stream_epoch, _)) = self.not_started_streams.last() && *stream_epoch == epoch { + let (_, stream) = self.not_started_streams.pop().expect("should not be empty"); + self.row_streams.push(stream.into_future()); + } + self.stream_state = StreamState::AllConsumingRow { curr_epoch: epoch }; + Ok(true) + } + + fn may_init_epoch(&mut self, epoch: u64) -> LogStoreResult<()> { + let prev_epoch = match &self.stream_state { + StreamState::Uninitialized => unreachable!("should have init"), + StreamState::BarrierEmitted { prev_epoch } => *prev_epoch, + StreamState::AllConsumingRow { curr_epoch } + | StreamState::BarrierAligning { curr_epoch, .. } => { + return if *curr_epoch != epoch { + Err(anyhow!( + "epoch {} does not match with current epoch {}", + epoch, + curr_epoch + )) + } else { + Ok(()) + }; + } + }; + + if prev_epoch >= epoch { + return Err(anyhow!( + "epoch {} should be greater than prev epoch {}", + epoch, + prev_epoch + )); + } + + while let Some((stream_epoch, _)) = self.not_started_streams.last() { + if *stream_epoch > epoch { + // Current epoch has not reached the first epoch of + // the stream. Later streams must also have greater epoch, so break here. + break; + } + if *stream_epoch < epoch { + return Err(anyhow!( + "current epoch {} has exceed epoch {} of stream not started", + epoch, + stream_epoch + )); + } + let (_, stream) = self.not_started_streams.pop().expect("should not be empty"); + self.row_streams.push(stream.into_future()); + } + + self.stream_state = StreamState::AllConsumingRow { curr_epoch: epoch }; + Ok(()) + } + + async fn next_op(&mut self) -> LogStoreResult> { while let (Some(result), stream) = self .row_streams .next() .await .expect("row stream should not be empty when polled") { - let (_key, value): (_, Bytes) = result?; - let (decoded_epoch, op) = self.serde.deserialize(value)?; - self.check_epoch(decoded_epoch)?; + let (decoded_epoch, op, read_size) = result?; + self.may_init_epoch(decoded_epoch)?; match op { LogStoreRowOp::Row { op, row } => { - match &self.stream_state { - StreamState::Uninitialized | StreamState::BarrierEmitted { .. } => { - self.stream_state = StreamState::AllConsumingRow { - curr_epoch: decoded_epoch, - } - } - _ => {} - }; self.row_streams.push(stream.into_future()); - return Ok(Some((decoded_epoch, LogStoreRowOp::Row { op, row }))); + return Ok(Some(( + decoded_epoch, + LogStoreRowOp::Row { op, row }, + read_size, + ))); } LogStoreRowOp::Barrier { is_checkpoint } => { self.check_is_checkpoint(is_checkpoint)?; @@ -528,6 +674,7 @@ impl LogStoreRowOpStream { return Ok(Some(( decoded_epoch, LogStoreRowOp::Barrier { is_checkpoint }, + read_size, ))); } else { self.stream_state = StreamState::BarrierAligning { @@ -541,11 +688,11 @@ impl LogStoreRowOpStream { } // End of stream match &self.stream_state { - StreamState::BarrierEmitted { .. } | StreamState::Uninitialized => {}, - s => return Err(LogStoreError::Internal( + StreamState::BarrierEmitted { .. } => {}, + s => return Err( anyhow!( "when any of the stream reaches the end, it should be right after emitting an barrier. Current state: {:?}", - s) + s ) ), } @@ -553,11 +700,16 @@ impl LogStoreRowOpStream { self.barrier_streams.is_empty(), "should not have any pending barrier received stream after barrier emit" ); + if !self.not_started_streams.is_empty() { + return Err(anyhow!( + "a stream has reached the end but some other stream has not started yet" + )); + } if cfg!(debug_assertion) { while let Some((opt, _stream)) = self.row_streams.next().await { if let Some(result) = opt { - return Err(LogStoreError::Internal( - anyhow!("when any of the stream reaches the end, other stream should also reaches the end, but poll result: {:?}", result)) + return Err( + anyhow!("when any of the stream reaches the end, other stream should also reaches the end, but poll result: {:?}", result) ); } } @@ -568,41 +720,47 @@ impl LogStoreRowOpStream { #[cfg(test)] mod tests { + use std::cmp::min; use std::future::poll_fn; + use std::sync::Arc; use std::task::Poll; + use bytes::Bytes; use futures::stream::empty; use futures::{pin_mut, stream, StreamExt, TryStreamExt}; use itertools::Itertools; use rand::prelude::SliceRandom; use rand::thread_rng; use risingwave_common::array::{Op, StreamChunk}; + use risingwave_common::buffer::Bitmap; + use risingwave_common::hash::VirtualNode; use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::types::DataType; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; - use risingwave_hummock_sdk::key::{FullKey, TableKey}; + use risingwave_hummock_sdk::key::FullKey; use risingwave_storage::store::StateStoreReadIterStream; use risingwave_storage::table::DEFAULT_VNODE; use tokio::sync::oneshot; use tokio::sync::oneshot::Sender; - use crate::common::log_store::kv_log_store::serde::{ - new_log_store_item_stream, LogStoreRowOp, LogStoreRowOpStream, LogStoreRowSerde, + use crate::common::log_store_impl::kv_log_store::serde::{ + merge_log_store_item_stream, KvLogStoreItem, LogStoreRowOp, LogStoreRowOpStream, + LogStoreRowSerde, }; - use crate::common::log_store::kv_log_store::test_utils::{ - gen_test_data, gen_test_log_store_table, TEST_TABLE_ID, + use crate::common::log_store_impl::kv_log_store::test_utils::{ + check_rows_eq, gen_test_data, gen_test_log_store_table, TEST_TABLE_ID, }; - use crate::common::log_store::kv_log_store::SeqIdType; - use crate::common::log_store::LogStoreReadItem; + use crate::common::log_store_impl::kv_log_store::{KvLogStoreReadMetrics, SeqIdType}; - const EPOCH1: u64 = 233; + const EPOCH0: u64 = 233; + const EPOCH1: u64 = EPOCH0 + 1; const EPOCH2: u64 = EPOCH1 + 1; #[test] fn test_serde() { let table = gen_test_log_store_table(); - let serde = LogStoreRowSerde::new(&table, None); + let serde = LogStoreRowSerde::new(&table, Some(Arc::new(Bitmap::ones(VirtualNode::COUNT)))); let (ops, rows) = gen_test_data(0); @@ -618,10 +776,16 @@ mod tests { let mut serialized_keys = vec![]; let mut seq_id = 1; - let delete_range_right1 = serde.serialize_truncation_offset_watermark(DEFAULT_VNODE, epoch); + fn remove_vnode_prefix(key: &Bytes) -> Bytes { + key.slice(VirtualNode::SIZE..) + } + let delete_range_right1 = remove_vnode_prefix( + &serde.serialize_truncation_offset_watermark(DEFAULT_VNODE, (epoch, None)), + ); for (op, row) in stream_chunk.rows() { let (_, key, value) = serde.serialize_data_row(epoch, seq_id, op, row); + let key = remove_vnode_prefix(&key.0); assert!(key < delete_range_right1); serialized_keys.push(key); let (decoded_epoch, row_op) = serde.deserialize(value).unwrap(); @@ -640,6 +804,7 @@ mod tests { } let (key, encoded_barrier) = serde.serialize_barrier(epoch, DEFAULT_VNODE, false); + let key = remove_vnode_prefix(&key.0); match serde.deserialize(encoded_barrier).unwrap() { (decoded_epoch, LogStoreRowOp::Barrier { is_checkpoint }) => { assert!(!is_checkpoint); @@ -647,16 +812,19 @@ mod tests { } _ => unreachable!(), } - assert!(key < delete_range_right1); + assert!(key.as_ref() < delete_range_right1); serialized_keys.push(key); seq_id = 1; epoch += 1; - let delete_range_right2 = serde.serialize_truncation_offset_watermark(DEFAULT_VNODE, epoch); + let delete_range_right2 = remove_vnode_prefix( + &serde.serialize_truncation_offset_watermark(DEFAULT_VNODE, (epoch, None)), + ); for (op, row) in stream_chunk.rows() { let (_, key, value) = serde.serialize_data_row(epoch, seq_id, op, row); + let key = remove_vnode_prefix(&key.0); assert!(key >= delete_range_right1); assert!(key < delete_range_right2); serialized_keys.push(key); @@ -676,6 +844,7 @@ mod tests { } let (key, encoded_checkpoint_barrier) = serde.serialize_barrier(epoch, DEFAULT_VNODE, true); + let key = remove_vnode_prefix(&key.0); match serde.deserialize(encoded_checkpoint_barrier).unwrap() { (decoded_epoch, LogStoreRowOp::Barrier { is_checkpoint }) => { assert_eq!(decoded_epoch, epoch); @@ -683,8 +852,8 @@ mod tests { } _ => unreachable!(), } - assert!(key >= delete_range_right1); - assert!(key < delete_range_right2); + assert!(key.as_ref() >= delete_range_right1); + assert!(key.as_ref() < delete_range_right2); serialized_keys.push(key); assert_eq!(serialized_keys.len(), 2 * rows.len() + 2); @@ -721,8 +890,7 @@ mod tests { #[tokio::test] async fn test_deserialize_stream_chunk() { let table = gen_test_log_store_table(); - let serde = LogStoreRowSerde::new(&table, None); - + let serde = LogStoreRowSerde::new(&table, Some(Arc::new(Bitmap::ones(VirtualNode::COUNT)))); let (ops, rows) = gen_test_data(0); let mut seq_id = 1; @@ -738,7 +906,13 @@ mod tests { let end_seq_id = seq_id - 1; tx.send(()).unwrap(); let chunk = serde - .deserialize_stream_chunk(stream, start_seq_id, end_seq_id, EPOCH1) + .deserialize_stream_chunk( + stream, + start_seq_id, + end_seq_id, + EPOCH1, + &KvLogStoreReadMetrics::for_test(), + ) .await .unwrap(); for (i, (op, row)) in chunk.rows().enumerate() { @@ -761,7 +935,7 @@ mod tests { .map(|(op, row)| { let (_, key, value) = serde.serialize_data_row(epoch, *seq_id, op, row); *seq_id += 1; - Ok((FullKey::new(TEST_TABLE_ID, TableKey(key), epoch), value)) + Ok((FullKey::new(TEST_TABLE_ID, key, epoch), value)) }) .collect_vec(); ( @@ -782,25 +956,34 @@ mod tests { impl StateStoreReadIterStream, oneshot::Sender<()>, oneshot::Sender<()>, + Vec, + Vec, ) { let (ops, rows) = gen_test_data(base); + let first_barrier = { + let (key, value) = serde.serialize_barrier(EPOCH0, DEFAULT_VNODE, true); + Ok((FullKey::new(TEST_TABLE_ID, key, EPOCH0), value)) + }; + let stream = stream::once(async move { first_barrier }); let (row_stream, tx1) = gen_row_stream(serde.clone(), ops.clone(), rows.clone(), EPOCH1, seq_id); - let stream = row_stream.chain(stream::once({ + let stream = stream.chain(row_stream); + let stream = stream.chain(stream::once({ let serde = serde.clone(); async move { let (key, value) = serde.serialize_barrier(EPOCH1, DEFAULT_VNODE, false); - Ok((FullKey::new(TEST_TABLE_ID, TableKey(key), EPOCH1), value)) + Ok((FullKey::new(TEST_TABLE_ID, key, EPOCH1), value)) } })); - let (row_stream, tx2) = gen_row_stream(serde.clone(), ops, rows, EPOCH2, seq_id); + let (row_stream, tx2) = + gen_row_stream(serde.clone(), ops.clone(), rows.clone(), EPOCH2, seq_id); let stream = stream.chain(row_stream).chain(stream::once({ async move { let (key, value) = serde.serialize_barrier(EPOCH2, DEFAULT_VNODE, true); - Ok((FullKey::new(TEST_TABLE_ID, TableKey(key), EPOCH2), value)) + Ok((FullKey::new(TEST_TABLE_ID, key, EPOCH2), value)) } })); - (stream, tx1, tx2) + (stream, tx1, tx2, ops, rows) } #[allow(clippy::type_complexity)] @@ -818,17 +1001,19 @@ mod tests { let mut streams = Vec::new(); let mut tx1 = Vec::new(); let mut tx2 = Vec::new(); + let mut ops = Vec::new(); + let mut rows = Vec::new(); for i in 0..size { - let (s, t1, t2) = gen_single_test_stream(serde.clone(), &mut seq_id, (100 * i) as _); + let (s, t1, t2, op_list, row_list) = + gen_single_test_stream(serde.clone(), &mut seq_id, (100 * i) as _); streams.push(s); tx1.push(Some(t1)); tx2.push(Some(t2)); + ops.push(op_list); + rows.push(row_list); } - let stream = LogStoreRowOpStream::new(streams, serde); - - let mut ops = Vec::new(); - let mut rows = Vec::new(); + let stream = LogStoreRowOpStream::new(streams, serde, KvLogStoreReadMetrics::for_test()); for i in 0..size { let (o, r) = gen_test_data((100 * i) as _); @@ -843,20 +1028,35 @@ mod tests { async fn test_row_stream_basic() { let table = gen_test_log_store_table(); - let serde = LogStoreRowSerde::new(&table, None); + let serde = LogStoreRowSerde::new(&table, Some(Arc::new(Bitmap::ones(VirtualNode::COUNT)))); const MERGE_SIZE: usize = 10; - let (stream, mut tx1, mut tx2, ops, rows) = gen_multi_test_stream(serde, MERGE_SIZE); + let (mut stream, mut tx1, mut tx2, ops, rows) = gen_multi_test_stream(serde, MERGE_SIZE); + + stream.init().await.unwrap(); pin_mut!(stream); + let (epoch, op, _) = stream.next_op().await.unwrap().unwrap(); + + assert_eq!( + ( + EPOCH0, + LogStoreRowOp::Barrier { + is_checkpoint: true + } + ), + (epoch, op) + ); + let mut index = (0..MERGE_SIZE).collect_vec(); index.shuffle(&mut thread_rng()); for i in index { tx1[i].take().unwrap().send(()).unwrap(); for j in 0..ops[i].len() { + let (epoch, op, _) = stream.next_op().await.unwrap().unwrap(); assert_eq!( ( EPOCH1, @@ -865,11 +1065,13 @@ mod tests { row: rows[i][j].clone(), } ), - stream.next_op().await.unwrap().unwrap() + (epoch, op) ); } } + let (epoch, op, _) = stream.next_op().await.unwrap().unwrap(); + assert_eq!( ( EPOCH1, @@ -877,7 +1079,7 @@ mod tests { is_checkpoint: false } ), - stream.next_op().await.unwrap().unwrap() + (epoch, op) ); let mut index = (0..MERGE_SIZE).collect_vec(); @@ -886,6 +1088,7 @@ mod tests { for i in index { tx2[i].take().unwrap().send(()).unwrap(); for j in 0..ops[i].len() { + let (epoch, op, _) = stream.next_op().await.unwrap().unwrap(); assert_eq!( ( EPOCH2, @@ -894,11 +1097,12 @@ mod tests { row: rows[i][j].clone(), } ), - stream.next_op().await.unwrap().unwrap() + (epoch, op) ); } } + let (epoch, op, _) = stream.next_op().await.unwrap().unwrap(); assert_eq!( ( EPOCH2, @@ -906,7 +1110,7 @@ mod tests { is_checkpoint: true, } ), - stream.next_op().await.unwrap().unwrap() + (epoch, op) ); assert!(stream.next_op().await.unwrap().is_none()); @@ -916,58 +1120,65 @@ mod tests { async fn test_log_store_stream_basic() { let table = gen_test_log_store_table(); - let serde = LogStoreRowSerde::new(&table, None); + let serde = LogStoreRowSerde::new(&table, Some(Arc::new(Bitmap::ones(VirtualNode::COUNT)))); let mut seq_id = 1; - let (stream, tx1, tx2) = gen_single_test_stream(serde.clone(), &mut seq_id, 0); - let (ops, rows) = gen_test_data(0); + let (stream, tx1, tx2, ops, rows) = gen_single_test_stream(serde.clone(), &mut seq_id, 0); const CHUNK_SIZE: usize = 3; - let stream = new_log_store_item_stream(vec![stream], serde, CHUNK_SIZE); + let stream = merge_log_store_item_stream( + vec![stream], + serde, + CHUNK_SIZE, + KvLogStoreReadMetrics::for_test(), + ); pin_mut!(stream); + let (epoch, item): (_, KvLogStoreItem) = stream.try_next().await.unwrap().unwrap(); + assert_eq!(EPOCH0, epoch); + match item { + KvLogStoreItem::StreamChunk(_) => unreachable!(), + KvLogStoreItem::Barrier { is_checkpoint } => { + assert!(is_checkpoint); + } + } + assert!(poll_fn(|cx| Poll::Ready(stream.poll_next_unpin(cx))) .await .is_pending()); tx1.send(()).unwrap(); - let (epoch, item): (_, LogStoreReadItem) = stream.try_next().await.unwrap().unwrap(); - assert_eq!(EPOCH1, epoch); - match item { - LogStoreReadItem::StreamChunk(chunk) => { - assert_eq!(chunk.cardinality(), CHUNK_SIZE); - for (i, (op, row)) in chunk.rows().enumerate() { - assert_eq!(op, ops[i]); - assert_eq!(row.to_owned_row(), rows[i]); - } - } - _ => unreachable!(), - } - - let (epoch, item): (_, LogStoreReadItem) = stream.try_next().await.unwrap().unwrap(); - assert_eq!(EPOCH1, epoch); - match item { - LogStoreReadItem::StreamChunk(chunk) => { - assert_eq!(chunk.cardinality(), ops.len() - CHUNK_SIZE); - for (i, (op, row)) in chunk.rows().skip(CHUNK_SIZE).enumerate() { - assert_eq!(op, ops[i + CHUNK_SIZE]); - assert_eq!(row.to_owned_row(), rows[i + CHUNK_SIZE]); + { + let mut remain = ops.len(); + while remain > 0 { + let size = min(remain, CHUNK_SIZE); + let start_index = ops.len() - remain; + remain -= size; + let (epoch, item): (_, KvLogStoreItem) = stream.try_next().await.unwrap().unwrap(); + assert_eq!(EPOCH1, epoch); + match item { + KvLogStoreItem::StreamChunk(chunk) => { + assert_eq!(chunk.cardinality(), size); + assert!(check_rows_eq( + chunk.rows(), + (start_index..(start_index + size)).map(|i| (ops[i], &rows[i])) + )); + } + _ => unreachable!(), } } - _ => unreachable!(), } - let (epoch, item): (_, LogStoreReadItem) = stream.try_next().await.unwrap().unwrap(); + let (epoch, item): (_, KvLogStoreItem) = stream.try_next().await.unwrap().unwrap(); assert_eq!(EPOCH1, epoch); match item { - LogStoreReadItem::StreamChunk(_) => unreachable!(), - LogStoreReadItem::Barrier { is_checkpoint } => { + KvLogStoreItem::StreamChunk(_) => unreachable!(), + KvLogStoreItem::Barrier { is_checkpoint } => { assert!(!is_checkpoint); } - _ => unreachable!(), } assert!(poll_fn(|cx| Poll::Ready(stream.poll_next_unpin(cx))) @@ -976,40 +1187,34 @@ mod tests { tx2.send(()).unwrap(); - let (epoch, item): (_, LogStoreReadItem) = stream.try_next().await.unwrap().unwrap(); - assert_eq!(EPOCH2, epoch); - match item { - LogStoreReadItem::StreamChunk(chunk) => { - assert_eq!(chunk.cardinality(), CHUNK_SIZE); - for (i, (op, row)) in chunk.rows().enumerate() { - assert_eq!(op, ops[i]); - assert_eq!(row.to_owned_row(), rows[i]); - } - } - _ => unreachable!(), - } - - let (epoch, item): (_, LogStoreReadItem) = stream.try_next().await.unwrap().unwrap(); - assert_eq!(EPOCH2, epoch); - match item { - LogStoreReadItem::StreamChunk(chunk) => { - assert_eq!(chunk.cardinality(), ops.len() - CHUNK_SIZE); - for (i, (op, row)) in chunk.rows().skip(CHUNK_SIZE).enumerate() { - assert_eq!(op, ops[i + CHUNK_SIZE]); - assert_eq!(row.to_owned_row(), rows[i + CHUNK_SIZE]); + { + let mut remain = ops.len(); + while remain > 0 { + let size = min(remain, CHUNK_SIZE); + let start_index = ops.len() - remain; + remain -= size; + let (epoch, item): (_, KvLogStoreItem) = stream.try_next().await.unwrap().unwrap(); + assert_eq!(EPOCH2, epoch); + match item { + KvLogStoreItem::StreamChunk(chunk) => { + assert_eq!(chunk.cardinality(), size); + assert!(check_rows_eq( + chunk.rows(), + (start_index..(start_index + size)).map(|i| (ops[i], &rows[i])) + )); + } + _ => unreachable!(), } } - _ => unreachable!(), } - let (epoch, item): (_, LogStoreReadItem) = stream.try_next().await.unwrap().unwrap(); + let (epoch, item): (_, KvLogStoreItem) = stream.try_next().await.unwrap().unwrap(); assert_eq!(EPOCH2, epoch); match item { - LogStoreReadItem::StreamChunk(_) => unreachable!(), - LogStoreReadItem::Barrier { is_checkpoint } => { + KvLogStoreItem::StreamChunk(_) => unreachable!(), + KvLogStoreItem::Barrier { is_checkpoint } => { assert!(is_checkpoint); } - _ => unreachable!(), } assert!(stream.next().await.is_none()); @@ -1019,11 +1224,16 @@ mod tests { async fn test_empty_stream() { let table = gen_test_log_store_table(); - let serde = LogStoreRowSerde::new(&table, None); + let serde = LogStoreRowSerde::new(&table, Some(Arc::new(Bitmap::ones(VirtualNode::COUNT)))); const CHUNK_SIZE: usize = 3; - let stream = new_log_store_item_stream(vec![empty(), empty()], serde, CHUNK_SIZE); + let stream = merge_log_store_item_stream( + vec![empty(), empty()], + serde, + CHUNK_SIZE, + KvLogStoreReadMetrics::for_test(), + ); pin_mut!(stream); diff --git a/src/stream/src/common/log_store_impl/kv_log_store/test_utils.rs b/src/stream/src/common/log_store_impl/kv_log_store/test_utils.rs new file mode 100644 index 0000000000000..809b5b42129d2 --- /dev/null +++ b/src/stream/src/common/log_store_impl/kv_log_store/test_utils.rs @@ -0,0 +1,207 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::{zip_eq, Itertools}; +use rand::RngCore; +use risingwave_common::array::{Op, RowRef, StreamChunk}; +use risingwave_common::buffer::{Bitmap, BitmapBuilder}; +use risingwave_common::catalog::{ColumnDesc, ColumnId, TableId}; +use risingwave_common::constants::log_store::KV_LOG_STORE_PREDEFINED_COLUMNS; +use risingwave_common::hash::VirtualNode; +use risingwave_common::row::{OwnedRow, Row}; +use risingwave_common::types::{DataType, ScalarImpl, ScalarRef}; +use risingwave_common::util::chunk_coalesce::DataChunkBuilder; +use risingwave_common::util::sort_util::OrderType; +use risingwave_pb::catalog::PbTable; + +use crate::common::table::test_utils::gen_prost_table_with_dist_key; + +pub(crate) const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; +pub(crate) const TEST_DATA_SIZE: usize = 10; + +pub(crate) fn gen_test_data(base: i64) -> (Vec, Vec) { + gen_sized_test_data(base, TEST_DATA_SIZE) +} + +pub(crate) fn gen_sized_test_data(base: i64, max_count: usize) -> (Vec, Vec) { + let mut ops = Vec::new(); + let mut rows = Vec::new(); + while ops.len() < max_count - 1 { + let index = ops.len() as i64; + match rand::thread_rng().next_u32() % 3 { + 0 => { + ops.push(Op::Insert); + rows.push(OwnedRow::new(vec![ + Some(ScalarImpl::Int64(index + base)), + Some(ScalarImpl::Utf8( + format!("name{}", index).as_str().to_owned_scalar(), + )), + ])); + } + 1 => { + ops.push(Op::Delete); + rows.push(OwnedRow::new(vec![ + Some(ScalarImpl::Int64(index + base)), + Some(ScalarImpl::Utf8( + format!("name{}", index).as_str().to_owned_scalar(), + )), + ])); + } + 2 => { + ops.push(Op::UpdateDelete); + rows.push(OwnedRow::new(vec![ + Some(ScalarImpl::Int64(index + base)), + Some(ScalarImpl::Utf8( + format!("name{}", index).as_str().to_owned_scalar(), + )), + ])); + ops.push(Op::UpdateInsert); + rows.push(OwnedRow::new(vec![ + Some(ScalarImpl::Int64(index + base)), + Some(ScalarImpl::Utf8( + format!("name{}", index + 1).as_str().to_owned_scalar(), + )), + ])); + } + _ => unreachable!(), + } + } + (ops, rows) +} + +pub(crate) fn test_payload_schema() -> Vec { + vec![ + ColumnDesc::unnamed(ColumnId::from(3), DataType::Int64), // id + ColumnDesc::unnamed(ColumnId::from(4), DataType::Varchar), // name + ] +} + +pub(crate) fn test_log_store_table_schema() -> Vec { + let mut column_descs = vec![ + ColumnDesc::unnamed(ColumnId::from(0), DataType::Int64), // epoch + ColumnDesc::unnamed(ColumnId::from(1), DataType::Int32), // Seq id + ColumnDesc::unnamed(ColumnId::from(2), DataType::Int16), // op code + ]; + column_descs.extend(test_payload_schema()); + column_descs +} + +pub(crate) fn gen_stream_chunk(base: i64) -> StreamChunk { + let (ops, rows) = gen_test_data(base); + let mut builder = DataChunkBuilder::new( + test_payload_schema() + .iter() + .map(|col| col.data_type.clone()) + .collect_vec(), + 1000000, + ); + for row in &rows { + assert!(builder.append_one_row(row).is_none()); + } + let data_chunk = builder.consume_all().unwrap(); + StreamChunk::from_parts(ops, data_chunk) +} + +pub(crate) fn gen_multi_vnode_stream_chunks( + base: i64, + max_count: usize, +) -> [StreamChunk; MOD_COUNT] { + let mut data_builder = (0..MOD_COUNT) + .map(|_| { + ( + Vec::new() as Vec, + DataChunkBuilder::new( + test_payload_schema() + .iter() + .map(|col| col.data_type.clone()) + .collect_vec(), + max_count, + ), + ) + }) + .collect_vec(); + let (ops, rows) = gen_sized_test_data(base, max_count); + for (op, row) in zip_eq(ops, rows) { + let vnode = VirtualNode::compute_row(&row, &[TEST_SCHEMA_DIST_KEY_INDEX]); + let (ops, builder) = &mut data_builder[vnode.to_index() % MOD_COUNT]; + ops.push(op); + assert!(builder.append_one_row(row).is_none()); + } + + data_builder + .into_iter() + .map(|(ops, mut builder)| StreamChunk::from_parts(ops, builder.consume_all().unwrap())) + .collect_vec() + .try_into() + .unwrap() +} + +pub(crate) const TEST_SCHEMA_DIST_KEY_INDEX: usize = 0; + +pub(crate) fn gen_test_log_store_table() -> PbTable { + let schema = test_log_store_table_schema(); + let order_types = vec![OrderType::ascending(), OrderType::ascending_nulls_last()]; + let pk_index = vec![0_usize, 1_usize]; + let read_prefix_len_hint = 0; + gen_prost_table_with_dist_key( + TEST_TABLE_ID, + schema, + order_types, + pk_index, + read_prefix_len_hint, + vec![TEST_SCHEMA_DIST_KEY_INDEX + KV_LOG_STORE_PREDEFINED_COLUMNS.len()], // id field + ) +} + +pub(crate) fn calculate_vnode_bitmap<'a>( + test_data: impl Iterator)>, +) -> Bitmap { + let mut builder = BitmapBuilder::zeroed(VirtualNode::COUNT); + for vnode in + test_data.map(|(_, row)| VirtualNode::compute_row(row, &[TEST_SCHEMA_DIST_KEY_INDEX])) + { + builder.set(vnode.to_index(), true); + } + builder.finish() +} + +pub(crate) fn check_rows_eq( + first: impl Iterator, + second: impl Iterator, +) -> bool { + for ((op1, row1), (op2, row2)) in zip_eq( + first.sorted_by_key(|(_, row)| { + row.datum_at(TEST_SCHEMA_DIST_KEY_INDEX) + .unwrap() + .into_int64() + }), + second.sorted_by_key(|(_, row)| { + row.datum_at(TEST_SCHEMA_DIST_KEY_INDEX) + .unwrap() + .into_int64() + }), + ) { + if op1 != op2 { + return false; + } + if row1.to_owned_row() != row2.to_owned_row() { + return false; + } + } + true +} + +pub(crate) fn check_stream_chunk_eq(first: &StreamChunk, second: &StreamChunk) -> bool { + check_rows_eq(first.rows(), second.rows()) +} diff --git a/src/stream/src/common/log_store/kv_log_store/writer.rs b/src/stream/src/common/log_store_impl/kv_log_store/writer.rs similarity index 83% rename from src/stream/src/common/log_store/kv_log_store/writer.rs rename to src/stream/src/common/log_store_impl/kv_log_store/writer.rs index 54d7db38b8570..1e6e8681fcd77 100644 --- a/src/stream/src/common/log_store/kv_log_store/writer.rs +++ b/src/stream/src/common/log_store_impl/kv_log_store/writer.rs @@ -19,14 +19,17 @@ use bytes::Bytes; use risingwave_common::array::StreamChunk; use risingwave_common::buffer::{Bitmap, BitmapBuilder}; use risingwave_common::catalog::TableId; +use risingwave_common::estimate_size::EstimateSize; use risingwave_common::hash::{VirtualNode, VnodeBitmapExt}; use risingwave_common::util::epoch::EpochPair; +use risingwave_connector::sink::log_store::{LogStoreResult, LogWriter}; use risingwave_storage::store::{InitOptions, LocalStateStore}; -use crate::common::log_store::kv_log_store::buffer::LogStoreBufferSender; -use crate::common::log_store::kv_log_store::serde::LogStoreRowSerde; -use crate::common::log_store::kv_log_store::{SeqIdType, FIRST_SEQ_ID}; -use crate::common::log_store::{LogStoreResult, LogWriter}; +use crate::common::log_store_impl::kv_log_store::buffer::LogStoreBufferSender; +use crate::common::log_store_impl::kv_log_store::serde::LogStoreRowSerde; +use crate::common::log_store_impl::kv_log_store::{ + FlushInfo, KvLogStoreMetrics, SeqIdType, FIRST_SEQ_ID, +}; pub struct KvLogStoreWriter { _table_id: TableId, @@ -38,6 +41,8 @@ pub struct KvLogStoreWriter { serde: LogStoreRowSerde, tx: LogStoreBufferSender, + + metrics: KvLogStoreMetrics, } impl KvLogStoreWriter { @@ -46,6 +51,7 @@ impl KvLogStoreWriter { state_store: LS, serde: LogStoreRowSerde, tx: LogStoreBufferSender, + metrics: KvLogStoreMetrics, ) -> Self { Self { _table_id: table_id, @@ -53,6 +59,7 @@ impl KvLogStoreWriter { state_store, serde, tx, + metrics, } } } @@ -68,7 +75,9 @@ impl LogWriter for KvLogStoreWriter { } async fn write_chunk(&mut self, chunk: StreamChunk) -> LogStoreResult<()> { - assert!(chunk.cardinality() > 0); + if chunk.cardinality() == 0 { + return Ok(()); + } let epoch = self.state_store.epoch(); let start_seq_id = self.seq_id; self.seq_id += chunk.cardinality() as SeqIdType; @@ -80,13 +89,16 @@ impl LogWriter for KvLogStoreWriter { // When enter this branch, the chunk cannot be added directly, and should be add to // state store and flush let mut vnode_bitmap_builder = BitmapBuilder::zeroed(VirtualNode::COUNT); + let mut flush_info = FlushInfo::new(); for (i, (op, row)) in chunk.rows().enumerate() { let seq_id = start_seq_id + (i as SeqIdType); assert!(seq_id <= end_seq_id); let (vnode, key, value) = self.serde.serialize_data_row(epoch, seq_id, op, row); vnode_bitmap_builder.set(vnode.to_index(), true); + flush_info.flush_one(key.estimated_size() + value.estimated_size()); self.state_store.insert(key, value, None)?; } + flush_info.report(&self.metrics); self.state_store.flush(Vec::new()).await?; let vnode_bitmap = vnode_bitmap_builder.finish(); @@ -102,8 +114,10 @@ impl LogWriter for KvLogStoreWriter { is_checkpoint: bool, ) -> LogStoreResult<()> { let epoch = self.state_store.epoch(); + let mut flush_info = FlushInfo::new(); for vnode in self.serde.vnodes().iter_vnodes() { let (key, value) = self.serde.serialize_barrier(epoch, vnode, is_checkpoint); + flush_info.flush_one(key.estimated_size() + value.estimated_size()); self.state_store.insert(key, value, None)?; } self.tx @@ -112,12 +126,14 @@ impl LogWriter for KvLogStoreWriter { let seq_id = start_seq_id + (i as SeqIdType); assert!(seq_id <= end_seq_id); let (_, key, value) = self.serde.serialize_data_row(epoch, seq_id, op, row); + flush_info.flush_one(key.estimated_size() + value.estimated_size()); self.state_store.insert(key, value, None)?; } Ok(()) })?; + flush_info.report(&self.metrics); let mut delete_range = Vec::with_capacity(self.serde.vnodes().count_ones()); - if let Some(truncation_offset) = self.tx.pop_truncation() { + if let Some(truncation_offset) = self.tx.pop_truncation(epoch) { for vnode in self.serde.vnodes().iter_vnodes() { let range_begin = Bytes::from(vnode.to_be_bytes().to_vec()); let range_end = self diff --git a/src/stream/src/common/log_store_impl/mod.rs b/src/stream/src/common/log_store_impl/mod.rs new file mode 100644 index 0000000000000..633fa07f2617d --- /dev/null +++ b/src/stream/src/common/log_store_impl/mod.rs @@ -0,0 +1,16 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod in_mem; +pub mod kv_log_store; diff --git a/src/stream/src/common/mod.rs b/src/stream/src/common/mod.rs index e865214cb0990..7f5111c29e03e 100644 --- a/src/stream/src/common/mod.rs +++ b/src/stream/src/common/mod.rs @@ -18,6 +18,6 @@ pub use column_mapping::*; mod builder; pub mod cache; mod column_mapping; -pub mod log_store; +pub mod log_store_impl; pub mod metrics; pub mod table; diff --git a/src/stream/src/common/table/state_table.rs b/src/stream/src/common/table/state_table.rs index dfdd11732c942..37e788a3e7abd 100644 --- a/src/stream/src/common/table/state_table.rs +++ b/src/stream/src/common/table/state_table.rs @@ -17,11 +17,12 @@ use std::ops::Bound::*; use std::sync::Arc; use bytes::{BufMut, Bytes, BytesMut}; +use either::Either; use futures::{pin_mut, FutureExt, Stream, StreamExt}; use futures_async_stream::for_await; use itertools::{izip, Itertools}; use risingwave_common::array::stream_record::Record; -use risingwave_common::array::{Op, StreamChunk, Vis}; +use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::buffer::Bitmap; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::{get_dist_key_in_pk_indices, ColumnDesc, TableId, TableOption}; @@ -34,7 +35,8 @@ use risingwave_common::util::row_serde::OrderedRowSerde; use risingwave_common::util::sort_util::OrderType; use risingwave_common::util::value_encoding::BasicSerde; use risingwave_hummock_sdk::key::{ - end_bound_of_prefix, next_key, prefixed_range, range_of_prefix, start_bound_of_excluded_prefix, + end_bound_of_prefix, map_table_key_range, next_key, prefixed_range, range_of_prefix, + start_bound_of_excluded_prefix, TableKey, }; use risingwave_pb::catalog::Table; use risingwave_storage::error::{StorageError, StorageResult}; @@ -560,8 +562,8 @@ where &self.value_indices } - pub fn is_dirty(&self) -> bool { - self.local_store.is_dirty() + fn is_dirty(&self) -> bool { + self.local_store.is_dirty() || self.state_clean_watermark.is_some() } pub fn vnode_bitmap(&self) -> &Bitmap { @@ -716,21 +718,26 @@ where } } - fn insert_inner(&mut self, key_bytes: Bytes, value_bytes: Bytes) { + fn insert_inner(&mut self, key: TableKey, value_bytes: Bytes) { self.local_store - .insert(key_bytes, value_bytes, None) + .insert(key, value_bytes, None) .unwrap_or_else(|e| self.handle_mem_table_error(e)); } - fn delete_inner(&mut self, key_bytes: Bytes, value_bytes: Bytes) { + fn delete_inner(&mut self, key: TableKey, value_bytes: Bytes) { self.local_store - .delete(key_bytes, value_bytes) + .delete(key, value_bytes) .unwrap_or_else(|e| self.handle_mem_table_error(e)); } - fn update_inner(&mut self, key_bytes: Bytes, old_value_bytes: Bytes, new_value_bytes: Bytes) { + fn update_inner( + &mut self, + key_bytes: TableKey, + old_value_bytes: Option, + new_value_bytes: Bytes, + ) { self.local_store - .insert(key_bytes, new_value_bytes, Some(old_value_bytes)) + .insert(key_bytes, new_value_bytes, old_value_bytes) .unwrap_or_else(|e| self.handle_mem_table_error(e)); } @@ -776,7 +783,19 @@ where let old_value_bytes = self.serialize_value(old_value); let new_value_bytes = self.serialize_value(new_value); - self.update_inner(new_key_bytes, old_value_bytes, new_value_bytes); + self.update_inner(new_key_bytes, Some(old_value_bytes), new_value_bytes); + } + + /// Update a row without giving old value. + /// + /// `is_consistent_op` should be set to false. + pub fn update_without_old_value(&mut self, new_value: impl Row) { + let new_pk = (&new_value).project(self.pk_indices()); + let new_key_bytes = + serialize_pk_with_vnode(new_pk, &self.pk_serde, self.compute_prefix_vnode(new_pk)); + let new_value_bytes = self.serialize_value(new_value); + + self.update_inner(new_key_bytes, None, new_value_bytes); } /// Write a record into state table. Must have the same schema with the table. @@ -825,45 +844,41 @@ where }) .collect_vec(); - let vis = key_chunk.vis(); - match vis { - Vis::Bitmap(vis) => { - for ((op, (key, key_bytes), value), vis) in - izip!(op.iter(), vnode_and_pks, values).zip_eq_debug(vis.iter()) - { - if vis { - match op { - Op::Insert | Op::UpdateInsert => { - if USE_WATERMARK_CACHE && let Some(ref pk) = key { + if !key_chunk.is_compacted() { + for ((op, (key, key_bytes), value), vis) in + izip!(op.iter(), vnode_and_pks, values).zip_eq_debug(key_chunk.visibility().iter()) + { + if vis { + match op { + Op::Insert | Op::UpdateInsert => { + if USE_WATERMARK_CACHE && let Some(ref pk) = key { self.watermark_cache.insert(pk); } - self.insert_inner(key_bytes, value); - } - Op::Delete | Op::UpdateDelete => { - if USE_WATERMARK_CACHE && let Some(ref pk) = key { + self.insert_inner(TableKey(key_bytes), value); + } + Op::Delete | Op::UpdateDelete => { + if USE_WATERMARK_CACHE && let Some(ref pk) = key { self.watermark_cache.delete(pk); } - self.delete_inner(key_bytes, value); - } + self.delete_inner(TableKey(key_bytes), value); } } } } - Vis::Compact(_) => { - for (op, (key, key_bytes), value) in izip!(op.iter(), vnode_and_pks, values) { - match op { - Op::Insert | Op::UpdateInsert => { - if USE_WATERMARK_CACHE && let Some(ref pk) = key { + } else { + for (op, (key, key_bytes), value) in izip!(op.iter(), vnode_and_pks, values) { + match op { + Op::Insert | Op::UpdateInsert => { + if USE_WATERMARK_CACHE && let Some(ref pk) = key { self.watermark_cache.insert(pk); } - self.insert_inner(key_bytes, value); - } - Op::Delete | Op::UpdateDelete => { - if USE_WATERMARK_CACHE && let Some(ref pk) = key { + self.insert_inner(TableKey(key_bytes), value); + } + Op::Delete | Op::UpdateDelete => { + if USE_WATERMARK_CACHE && let Some(ref pk) = key { self.watermark_cache.delete(pk); } - self.delete_inner(key_bytes, value); - } + self.delete_inner(TableKey(key_bytes), value); } } } @@ -893,9 +908,15 @@ where // Tick the watermark buffer here because state table is expected to be committed once // per epoch. self.watermark_buffer_strategy.tick(); - self.seal_current_epoch(new_epoch.curr) - .instrument(tracing::info_span!("state_table_commit")) - .await?; + if !self.is_dirty() { + // If the state table is not modified, go fast path. + self.local_store.seal_current_epoch(new_epoch.curr); + return Ok(()); + } else { + self.seal_current_epoch(new_epoch.curr) + .instrument(tracing::info_span!("state_table_commit")) + .await?; + } // Refresh watermark cache if it is out of sync. if USE_WATERMARK_CACHE && !self.watermark_cache.is_synced() { @@ -915,7 +936,7 @@ where let mut streams = vec![]; for vnode in self.vnodes().iter_vnodes() { let stream = self - .iter_row_with_pk_range(&range, vnode, PrefetchOptions::default()) + .iter_with_vnode(vnode, &range, PrefetchOptions::default()) .await?; streams.push(Box::pin(stream)); } @@ -1073,38 +1094,16 @@ where S: StateStore, SD: ValueRowSerde, { - /// This function scans rows from the relational table. - pub async fn iter_row( - &self, - prefetch_options: PrefetchOptions, - ) -> StreamExecutorResult> { - self.iter_row_with_pk_prefix(row::empty(), prefetch_options) - .await - } - - /// This function scans rows from the relational table with specific `pk_prefix`. - /// `pk_prefix` is used to identify the exact vnode the scan should perform on. - pub async fn iter_row_with_pk_prefix( - &self, - pk_prefix: impl Row, - prefetch_options: PrefetchOptions, - ) -> StreamExecutorResult> { - Ok(deserialize_keyed_row_stream( - self.iter_kv_with_pk_prefix(pk_prefix, prefetch_options) - .await?, - &self.row_serde, - )) - } - /// This function scans rows from the relational table with specific `pk_range` under the same /// `vnode`. - pub async fn iter_row_with_pk_range( + pub async fn iter_with_vnode( &self, - pk_range: &(Bound, Bound), + // Optional vnode that returns an iterator only over the given range under that vnode. // For now, we require this parameter, and will panic. In the future, when `None`, we can // iterate over each vnode that the `StateTableInner` owns. vnode: VirtualNode, + pk_range: &(Bound, Bound), prefetch_options: PrefetchOptions, ) -> StreamExecutorResult> { Ok(deserialize_keyed_row_stream( @@ -1129,17 +1128,23 @@ where prefetch_options, cache_policy: CachePolicy::Fill(CachePriority::High), }; + let table_key_range = map_table_key_range(key_range); - Ok(self.local_store.iter(key_range, read_options).await?) + Ok(self.local_store.iter(table_key_range, read_options).await?) } - /// This function scans raw key-values from the relational table with specific `pk_prefix`. + /// This function scans rows from the relational table with specific `prefix` and `sub_range` under the same + /// `vnode`. If `sub_range` is (Unbounded, Unbounded), it scans rows from the relational table with specific `pk_prefix`. /// `pk_prefix` is used to identify the exact vnode the scan should perform on. - async fn iter_kv_with_pk_prefix( + + /// This function scans rows from the relational table with specific `prefix` and `pk_sub_range` under the same + /// `vnode`. + pub async fn iter_with_prefix( &self, pk_prefix: impl Row, + sub_range: &(Bound, Bound), prefetch_options: PrefetchOptions, - ) -> StreamExecutorResult<::IterStream<'_>> { + ) -> StreamExecutorResult> { let prefix_serializer = self.pk_serde.prefix(pk_prefix.len()); let encoded_prefix = serialize_pk(&pk_prefix, &prefix_serializer); let encoded_key_range = range_of_prefix(&encoded_prefix); @@ -1174,8 +1179,20 @@ where "storage_iter_with_prefix" ); - self.iter_kv(encoded_key_range_with_vnode, prefix_hint, prefetch_options) - .await + let memcomparable_range = + prefix_and_sub_range_to_memcomparable(&self.pk_serde, sub_range, pk_prefix); + + let memcomparable_range_with_vnode = prefixed_range(memcomparable_range, &vnode); + + Ok(deserialize_keyed_row_stream( + self.iter_kv( + memcomparable_range_with_vnode, + prefix_hint, + prefetch_options, + ) + .await?, + &self.row_serde, + )) } /// This function scans raw key-values from the relational table with specific `pk_range` under @@ -1216,7 +1233,7 @@ where // If this assertion fails, then something must be wrong with the operator implementation or // the distribution derivation from the optimizer. let vnode = self.compute_prefix_vnode(&pk_prefix).to_be_bytes(); - let encoded_key_range_with_vnode = prefixed_range(encoded_key_range, &vnode); + let table_key_range = map_table_key_range(prefixed_range(encoded_key_range, &vnode)); // Construct prefix hint for prefix bloom filter. if self.prefix_hint_len != 0 { @@ -1245,7 +1262,7 @@ where }; self.local_store - .may_exist(encoded_key_range_with_vnode, read_options) + .may_exist(table_key_range, read_options) .await .map_err(Into::into) } @@ -1280,15 +1297,38 @@ pub fn prefix_range_to_memcomparable( range: &(Bound, Bound), ) -> (Bound, Bound) { ( - to_memcomparable(pk_serde, &range.0, false), - to_memcomparable(pk_serde, &range.1, true), + start_range_to_memcomparable(pk_serde, &range.0), + end_range_to_memcomparable(pk_serde, &range.1, None), ) } -fn to_memcomparable( +fn prefix_and_sub_range_to_memcomparable( + pk_serde: &OrderedRowSerde, + sub_range: &(Bound, Bound), + pk_prefix: impl Row, +) -> (Bound, Bound) { + let (range_start, range_end) = sub_range; + let prefix_serializer = pk_serde.prefix(pk_prefix.len()); + let serialized_pk_prefix = serialize_pk(&pk_prefix, &prefix_serializer); + let start_range = match range_start { + Included(start_range) => Bound::Included(Either::Left((&pk_prefix).chain(start_range))), + Excluded(start_range) => Bound::Excluded(Either::Left((&pk_prefix).chain(start_range))), + Unbounded => Bound::Included(Either::Right(&pk_prefix)), + }; + let end_range = match range_end { + Included(end_range) => Bound::Included((&pk_prefix).chain(end_range)), + Excluded(end_range) => Bound::Excluded((&pk_prefix).chain(end_range)), + Unbounded => Unbounded, + }; + ( + start_range_to_memcomparable(pk_serde, &start_range), + end_range_to_memcomparable(pk_serde, &end_range, Some(serialized_pk_prefix)), + ) +} + +fn start_range_to_memcomparable( pk_serde: &OrderedRowSerde, bound: &Bound, - is_upper: bool, ) -> Bound { let serialize_pk_prefix = |pk_prefix: &R| { let prefix_serializer = pk_serde.prefix(pk_prefix.len()); @@ -1298,20 +1338,39 @@ fn to_memcomparable( Unbounded => Unbounded, Included(r) => { let serialized = serialize_pk_prefix(r); - if is_upper { - end_bound_of_prefix(&serialized) - } else { - Included(serialized) - } + + Included(serialized) } Excluded(r) => { let serialized = serialize_pk_prefix(r); - if !is_upper { - // if lower - start_bound_of_excluded_prefix(&serialized) - } else { - Excluded(serialized) - } + + start_bound_of_excluded_prefix(&serialized) + } + } +} + +fn end_range_to_memcomparable( + pk_serde: &OrderedRowSerde, + bound: &Bound, + serialized_pk_prefix: Option, +) -> Bound { + let serialize_pk_prefix = |pk_prefix: &R| { + let prefix_serializer = pk_serde.prefix(pk_prefix.len()); + serialize_pk(pk_prefix, &prefix_serializer) + }; + match bound { + Unbounded => match serialized_pk_prefix { + Some(serialized_pk_prefix) => end_bound_of_prefix(&serialized_pk_prefix), + None => Unbounded, + }, + Included(r) => { + let serialized = serialize_pk_prefix(r); + + end_bound_of_prefix(&serialized) + } + Excluded(r) => { + let serialized = serialize_pk_prefix(r); + Excluded(serialized) } } } diff --git a/src/stream/src/common/table/state_table_cache.rs b/src/stream/src/common/table/state_table_cache.rs index 156637a41a1a4..b458ef52537e4 100644 --- a/src/stream/src/common/table/state_table_cache.rs +++ b/src/stream/src/common/table/state_table_cache.rs @@ -67,9 +67,9 @@ type WatermarkCacheKey = DefaultOrdered; /// Issue delete ranges. /// /// B. Refreshing the cache: -/// On barrier, do table scan from most_recently_cleaned_watermark (inclusive) to +inf. +/// On barrier, do table scan from `most_recently_cleaned_watermark` (inclusive) to +inf. /// Take the Top N rows and insert into cache. -/// This has to be implemented in state_table. +/// This has to be implemented in `state_table`. /// We do not need to store any values, just the keys. /// /// TODO(kwannoel): diff --git a/src/stream/src/common/table/test_state_table.rs b/src/stream/src/common/table/test_state_table.rs index c3e5759a47ae6..7b6d1dce99f21 100644 --- a/src/stream/src/common/table/test_state_table.rs +++ b/src/stream/src/common/table/test_state_table.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Bound::{self, *}; + use futures::{pin_mut, StreamExt}; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::buffer::Bitmap; @@ -280,8 +282,9 @@ async fn test_state_table_iter_with_prefix() { ])); let pk_prefix = OwnedRow::new(vec![Some(1_i32.into())]); + let sub_range: &(Bound, Bound) = &(Bound::Unbounded, Bound::Unbounded); let iter = state_table - .iter_row_with_pk_prefix(&pk_prefix, Default::default()) + .iter_with_prefix(&pk_prefix, sub_range, Default::default()) .await .unwrap(); pin_mut!(iter); @@ -412,7 +415,7 @@ async fn test_state_table_iter_with_pk_range() { std::ops::Bound::Included(OwnedRow::new(vec![Some(4_i32.into())])), ); let iter = state_table - .iter_row_with_pk_range(&pk_range, DEFAULT_VNODE, Default::default()) + .iter_with_vnode(DEFAULT_VNODE, &pk_range, Default::default()) .await .unwrap(); pin_mut!(iter); @@ -437,7 +440,7 @@ async fn test_state_table_iter_with_pk_range() { std::ops::Bound::::Unbounded, ); let iter = state_table - .iter_row_with_pk_range(&pk_range, DEFAULT_VNODE, Default::default()) + .iter_with_vnode(DEFAULT_VNODE, &pk_range, Default::default()) .await .unwrap(); pin_mut!(iter); @@ -574,9 +577,12 @@ async fn test_state_table_iter_with_value_indices() { Some(99_i32.into()), Some(999_i32.into()), ])); - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); { - let iter = state_table.iter_row(Default::default()).await.unwrap(); + let iter = state_table + .iter_with_prefix(row::empty(), sub_range, Default::default()) + .await + .unwrap(); pin_mut!(iter); let res = iter.next().await.unwrap().unwrap(); @@ -631,7 +637,10 @@ async fn test_state_table_iter_with_value_indices() { Some(888_i32.into()), ])); - let iter = state_table.iter_row(Default::default()).await.unwrap(); + let iter = state_table + .iter_with_prefix(row::empty(), sub_range, Default::default()) + .await + .unwrap(); pin_mut!(iter); let res = iter.next().await.unwrap().unwrap(); @@ -735,9 +744,12 @@ async fn test_state_table_iter_with_shuffle_value_indices() { Some(99_i32.into()), Some(999_i32.into()), ])); - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); { - let iter = state_table.iter_row(Default::default()).await.unwrap(); + let iter = state_table + .iter_with_prefix(row::empty(), sub_range, Default::default()) + .await + .unwrap(); pin_mut!(iter); let res = iter.next().await.unwrap().unwrap(); @@ -813,7 +825,10 @@ async fn test_state_table_iter_with_shuffle_value_indices() { Some(888_i32.into()), ])); - let iter = state_table.iter_row(Default::default()).await.unwrap(); + let iter = state_table + .iter_with_prefix(row::empty(), sub_range, Default::default()) + .await + .unwrap(); pin_mut!(iter); let res = iter.next().await.unwrap().unwrap(); @@ -998,9 +1013,13 @@ async fn test_state_table_write_chunk() { ); state_table.write_chunk(chunk); - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); let rows: Vec<_> = state_table - .iter_row(PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + row::empty(), + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await .unwrap() .collect::>() @@ -1108,16 +1127,17 @@ async fn test_state_table_write_chunk_visibility() { &data_types, ); let (ops, columns, _) = chunk.into_inner(); - let chunk = StreamChunk::new( - ops, - columns, - Some(Bitmap::from_iter([true, true, true, false])), - ); + let chunk = + StreamChunk::with_visibility(ops, columns, Bitmap::from_iter([true, true, true, false])); state_table.write_chunk(chunk); - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); let rows: Vec<_> = state_table - .iter_row(PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + row::empty(), + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await .unwrap() .collect::>() @@ -1227,9 +1247,13 @@ async fn test_state_table_write_chunk_value_indices() { ); state_table.write_chunk(chunk); - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); let rows: Vec<_> = state_table - .iter_row(PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + row::empty(), + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await .unwrap() .collect::>() @@ -1509,9 +1533,13 @@ async fn test_state_table_watermark_cache_ignore_null() { let chunk = StreamChunk::from_rows(&rows, &data_types); state_table.write_chunk(chunk); - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); let inserted_rows: Vec<_> = state_table - .iter_row(PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + row::empty(), + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await .unwrap() .collect::>() @@ -1796,9 +1824,13 @@ async fn test_state_table_watermark_cache_refill() { for row in &rows { state_table.insert(row); } - + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); let inserted_rows: Vec<_> = state_table - .iter_row(PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + row::empty(), + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await .unwrap() .collect::>() @@ -1833,3 +1865,183 @@ async fn test_state_table_watermark_cache_refill() { .as_scalar_ref_impl() ) } + +#[tokio::test] +async fn test_state_table_iter_prefix_and_sub_range() { + const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; + let test_env = prepare_hummock_test_env().await; + + let order_types = vec![OrderType::ascending(), OrderType::ascending()]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_descs = vec![ + ColumnDesc::unnamed(column_ids[0], DataType::Int32), + ColumnDesc::unnamed(column_ids[1], DataType::Int32), + ColumnDesc::unnamed(column_ids[2], DataType::Int32), + ]; + let pk_index = vec![0_usize, 1_usize]; + let read_prefix_len_hint = 0; + let table = gen_prost_table( + TEST_TABLE_ID, + column_descs, + order_types, + pk_index, + read_prefix_len_hint, + ); + + test_env.register_table(table.clone()).await; + let mut state_table = + StateTable::from_table_catalog_inconsistent_op(&table, test_env.storage.clone(), None) + .await; + let mut epoch = EpochPair::new_test_epoch(1); + state_table.init_epoch(epoch); + + state_table.insert(OwnedRow::new(vec![ + Some(1_i32.into()), + Some(11_i32.into()), + Some(111_i32.into()), + ])); + state_table.insert(OwnedRow::new(vec![ + Some(1_i32.into()), + Some(22_i32.into()), + Some(222_i32.into()), + ])); + state_table.insert(OwnedRow::new(vec![ + Some(1_i32.into()), + Some(33_i32.into()), + Some(333_i32.into()), + ])); + + state_table.insert(OwnedRow::new(vec![ + Some(4_i32.into()), + Some(44_i32.into()), + Some(444_i32.into()), + ])); + + epoch.inc(); + state_table.commit(epoch).await.unwrap(); + + let pk_prefix = OwnedRow::new(vec![Some(1_i32.into())]); + + let sub_range1 = ( + std::ops::Bound::Included(OwnedRow::new(vec![Some(11_i32.into())])), + std::ops::Bound::Excluded(OwnedRow::new(vec![Some(33_i32.into())])), + ); + + let iter = state_table + .iter_with_prefix(pk_prefix, &sub_range1, Default::default()) + .await + .unwrap(); + + pin_mut!(iter); + + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(11_i32.into()), + Some(111_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(22_i32.into()), + Some(222_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await; + assert!(res.is_none()); + + let sub_range2: (Bound, Bound) = ( + std::ops::Bound::Excluded(OwnedRow::new(vec![Some(11_i32.into())])), + std::ops::Bound::Unbounded, + ); + + let pk_prefix = OwnedRow::new(vec![Some(1_i32.into())]); + let iter = state_table + .iter_with_prefix(pk_prefix, &sub_range2, Default::default()) + .await + .unwrap(); + + pin_mut!(iter); + + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(22_i32.into()), + Some(222_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(33_i32.into()), + Some(333_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await; + assert!(res.is_none()); + + let sub_range3: (Bound, Bound) = ( + std::ops::Bound::Unbounded, + std::ops::Bound::Included(OwnedRow::new(vec![Some(33_i32.into())])), + ); + + let pk_prefix = OwnedRow::new(vec![Some(1_i32.into())]); + let iter = state_table + .iter_with_prefix(pk_prefix, &sub_range3, Default::default()) + .await + .unwrap(); + + pin_mut!(iter); + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(11_i32.into()), + Some(111_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(22_i32.into()), + Some(222_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await.unwrap().unwrap(); + + assert_eq!( + &OwnedRow::new(vec![ + Some(1_i32.into()), + Some(33_i32.into()), + Some(333_i32.into()), + ]), + res.as_ref() + ); + + let res = iter.next().await; + assert!(res.is_none()); +} diff --git a/src/stream/src/common/table/test_utils.rs b/src/stream/src/common/table/test_utils.rs index 526f6864b3a99..90e7886df26bf 100644 --- a/src/stream/src/common/table/test_utils.rs +++ b/src/stream/src/common/table/test_utils.rs @@ -38,6 +38,26 @@ pub(crate) fn gen_prost_table( ) } +pub(crate) fn gen_prost_table_with_dist_key( + table_id: TableId, + column_descs: Vec, + order_types: Vec, + pk_index: Vec, + read_prefix_len_hint: u32, + distribution_key: Vec, +) -> PbTable { + let col_len = column_descs.len() as i32; + gen_prost_table_inner( + table_id, + column_descs, + order_types, + pk_index, + read_prefix_len_hint, + (0..col_len).collect_vec(), + distribution_key, + ) +} + pub(crate) fn gen_prost_table_with_value_indices( table_id: TableId, column_descs: Vec, @@ -45,6 +65,26 @@ pub(crate) fn gen_prost_table_with_value_indices( pk_index: Vec, read_prefix_len_hint: u32, value_indices: Vec, +) -> PbTable { + gen_prost_table_inner( + table_id, + column_descs, + order_types, + pk_index, + read_prefix_len_hint, + value_indices, + Vec::default(), + ) +} + +pub(crate) fn gen_prost_table_inner( + table_id: TableId, + column_descs: Vec, + order_types: Vec, + pk_index: Vec, + read_prefix_len_hint: u32, + value_indices: Vec, + distribution_key: Vec, ) -> PbTable { let prost_pk = pk_index .iter() @@ -62,12 +102,15 @@ pub(crate) fn gen_prost_table_with_value_indices( }) .collect(); + let distribution_key = distribution_key.into_iter().map(|i| i as i32).collect_vec(); + PbTable { id: table_id.table_id(), columns: prost_columns, pk: prost_pk, read_prefix_len_hint, value_indices, + distribution_key, ..Default::default() } } diff --git a/src/stream/src/error.rs b/src/stream/src/error.rs index b737de4d2560b..2930cda31747e 100644 --- a/src/stream/src/error.rs +++ b/src/stream/src/error.rs @@ -16,6 +16,7 @@ use std::backtrace::Backtrace; use risingwave_common::array::ArrayError; use risingwave_connector::error::ConnectorError; +use risingwave_connector::sink::SinkError; use risingwave_expr::ExprError; use risingwave_pb::PbFieldNotFound; use risingwave_storage::error::StorageError; @@ -58,6 +59,9 @@ enum ErrorKind { #[error("Executor error: {0:?}")] Executor(#[source] StreamExecutorError), + #[error("Sink error: {0:?}")] + Sink(#[source] SinkError), + #[error(transparent)] Internal(anyhow::Error), } @@ -115,6 +119,12 @@ impl From for StreamError { } } +impl From for StreamError { + fn from(value: SinkError) -> Self { + ErrorKind::Sink(value).into() + } +} + impl From for StreamError { fn from(err: PbFieldNotFound) -> Self { Self::from(anyhow::anyhow!( diff --git a/src/stream/src/executor/actor.rs b/src/stream/src/executor/actor.rs index 17f874acb0fc6..85846557a3c4a 100644 --- a/src/stream/src/executor/actor.rs +++ b/src/stream/src/executor/actor.rs @@ -37,9 +37,11 @@ pub struct ActorContext { pub id: ActorId, pub fragment_id: u32, + // TODO(eric): these seem to be useless now? last_mem_val: Arc, cur_mem_val: Arc, total_mem_val: Arc>, + pub streaming_metrics: Arc, pub error_suppressor: Arc>, } @@ -78,7 +80,8 @@ impl ActorContext { } pub fn on_compute_error(&self, err: ExprError, identity: &str) { - tracing::error!("Compute error: {}, executor: {identity}", err); + tracing::error!(identity, %err, "failed to evaluate expression"); + let executor_name = identity.split(' ').next().unwrap_or("name_not_found"); let mut err_str = err.to_string(); diff --git a/src/stream/src/executor/agg_common.rs b/src/stream/src/executor/agg_common.rs index 56d9fec20d027..fbaa80c3fbeb7 100644 --- a/src/stream/src/executor/agg_common.rs +++ b/src/stream/src/executor/agg_common.rs @@ -15,7 +15,7 @@ use std::collections::HashMap; use std::sync::Arc; -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_storage::StateStore; use super::aggregation::AggStateStorage; @@ -40,7 +40,7 @@ pub struct AggExecutorArgs { pub agg_calls: Vec, pub row_count_index: usize, pub storages: Vec>, - pub result_table: StateTable, + pub intermediate_state_table: StateTable, pub distinct_dedup_tables: HashMap>, pub watermark_epoch: AtomicU64Ref, pub metrics: Arc, @@ -57,6 +57,7 @@ impl AggExecutorExtraArgs for SimpleAggExecutorExtraArgs {} pub struct HashAggExecutorExtraArgs { pub group_key_indices: Vec, pub chunk_size: usize, + pub max_dirty_groups_heap_size: usize, pub emit_on_window_close: bool, } impl AggExecutorExtraArgs for HashAggExecutorExtraArgs {} diff --git a/src/stream/src/executor/aggregation/agg_group.rs b/src/stream/src/executor/aggregation/agg_group.rs index fb93f600e1fb6..d854969120919 100644 --- a/src/stream/src/executor/aggregation/agg_group.rs +++ b/src/stream/src/executor/aggregation/agg_group.rs @@ -17,13 +17,14 @@ use std::marker::PhantomData; use std::sync::Arc; use risingwave_common::array::stream_record::{Record, RecordType}; -use risingwave_common::array::{StreamChunk, Vis}; +use risingwave_common::array::StreamChunk; +use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::Schema; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::must_match; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::{AggCall, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggCall, BoxedAggregateFunction}; use risingwave_storage::StateStore; use super::agg_state::{AggState, AggStateStorage}; @@ -159,7 +160,7 @@ pub struct AggGroup { /// Current managed states for all [`AggCall`]s. states: Vec, - /// Previous outputs of managed states. Initializing with `None`. + /// Previous outputs of aggregate functions. Initializing with `None`. prev_outputs: Option, /// Index of row count agg call (`count(*)`) in the call list. @@ -195,17 +196,17 @@ impl AggGroup { agg_calls: &[AggCall], agg_funcs: &[BoxedAggregateFunction], storages: &[AggStateStorage], - result_table: &StateTable, + intermediate_state_table: &StateTable, pk_indices: &PkIndices, row_count_index: usize, extreme_cache_size: usize, input_schema: &Schema, ) -> StreamExecutorResult { - let prev_outputs: Option = result_table + let encoded_states = intermediate_state_table .get_row(group_key.as_ref().map(GroupKey::table_pk)) .await?; - if let Some(prev_outputs) = &prev_outputs { - assert_eq!(prev_outputs.len(), agg_calls.len()); + if let Some(encoded_states) = &encoded_states { + assert_eq!(encoded_states.len(), agg_calls.len()); } let mut states = Vec::with_capacity(agg_calls.len()); @@ -214,7 +215,50 @@ impl AggGroup { agg_call, agg_func, &storages[idx], - prev_outputs.as_ref().map(|outputs| &outputs[idx]), + encoded_states.as_ref().map(|outputs| &outputs[idx]), + pk_indices, + extreme_cache_size, + input_schema, + )?; + states.push(state); + } + + let mut this = Self { + group_key, + states, + prev_outputs: None, // will be initialized later + row_count_index, + _phantom: PhantomData, + }; + + if encoded_states.is_some() { + let (_, outputs) = this.get_outputs(storages, agg_funcs).await?; + this.prev_outputs = Some(outputs); + } + + Ok(this) + } + + /// Create a group from encoded states for EOWC. The previous output is set to `None`. + #[allow(clippy::too_many_arguments)] + pub fn create_eowc( + group_key: Option, + agg_calls: &[AggCall], + agg_funcs: &[BoxedAggregateFunction], + storages: &[AggStateStorage], + encoded_states: &OwnedRow, + pk_indices: &PkIndices, + row_count_index: usize, + extreme_cache_size: usize, + input_schema: &Schema, + ) -> StreamExecutorResult { + let mut states = Vec::with_capacity(agg_calls.len()); + for (idx, (agg_call, agg_func)) in agg_calls.iter().zip_eq_fast(agg_funcs).enumerate() { + let state = AggState::create( + agg_call, + agg_func, + &storages[idx], + Some(&encoded_states[idx]), pk_indices, extreme_cache_size, input_schema, @@ -225,7 +269,7 @@ impl AggGroup { Ok(Self { group_key, states, - prev_outputs, + prev_outputs: None, row_count_index, _phantom: PhantomData, }) @@ -286,7 +330,7 @@ impl AggGroup { chunk: &StreamChunk, calls: &[AggCall], funcs: &[BoxedAggregateFunction], - visibilities: Vec, + visibilities: Vec, ) -> StreamExecutorResult<()> { if self.curr_row_count() == 0 { tracing::trace!(group = ?self.group_key_row(), "first time see this group"); @@ -314,6 +358,28 @@ impl AggGroup { } } + /// Encode intermediate states. + pub fn encode_states( + &self, + funcs: &[BoxedAggregateFunction], + ) -> StreamExecutorResult { + let mut encoded_states = Vec::with_capacity(self.states.len()); + for (state, func) in self.states.iter().zip_eq_fast(funcs) { + let encoded = match state { + AggState::Value(s) => func.encode_state(s)?, + // For minput state, we don't need to store it in state table. + AggState::MaterializedInput(_) => None, + }; + encoded_states.push(encoded); + } + let states = self + .group_key() + .map(GroupKey::table_row) + .chain(OwnedRow::new(encoded_states)) + .into_owned_row(); + Ok(states) + } + /// Get the outputs of all managed agg states, without group key prefix. /// Possibly need to read/sync from state table if the state not cached in memory. /// This method is idempotent, i.e. it can be called multiple times and the outputs are diff --git a/src/stream/src/executor/aggregation/agg_state.rs b/src/stream/src/executor/aggregation/agg_state.rs index 1bc4028eeac9d..0c1932c58831c 100644 --- a/src/stream/src/executor/aggregation/agg_state.rs +++ b/src/stream/src/executor/aggregation/agg_state.rs @@ -12,12 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_common::array::{StreamChunk, Vis}; +use risingwave_common::array::StreamChunk; +use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::Schema; use risingwave_common::estimate_size::EstimateSize; use risingwave_common::must_match; use risingwave_common::types::Datum; -use risingwave_expr::agg::{AggCall, AggregateState, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggCall, AggregateState, BoxedAggregateFunction}; use risingwave_storage::StateStore; use super::minput::MaterializedInputState; @@ -43,7 +44,7 @@ pub enum AggStateStorage { /// State for single aggregation call. It manages the state cache and interact with the /// underlying state store if necessary. pub enum AggState { - /// State as single scalar value. + /// State as a single scalar value. /// e.g. `count`, `sum`, append-only `min`/`max`. Value(AggregateState), @@ -67,15 +68,15 @@ impl AggState { agg_call: &AggCall, agg_func: &BoxedAggregateFunction, storage: &AggStateStorage, - prev_output: Option<&Datum>, + encoded_state: Option<&Datum>, pk_indices: &PkIndices, extreme_cache_size: usize, input_schema: &Schema, ) -> StreamExecutorResult { Ok(match storage { AggStateStorage::Value => { - let state = match prev_output { - Some(prev) => AggregateState::Datum(prev.clone()), + let state = match encoded_state { + Some(encoded) => agg_func.decode_state(encoded.clone())?, None => agg_func.create_state(), }; Self::Value(state) @@ -98,7 +99,7 @@ impl AggState { chunk: &StreamChunk, call: &AggCall, func: &BoxedAggregateFunction, - visibility: Vis, + visibility: Bitmap, ) -> StreamExecutorResult<()> { match self { Self::Value(state) => { @@ -108,7 +109,7 @@ impl AggState { } Self::MaterializedInput(state) => { // the input chunk for minput is unprojected - let chunk = chunk.with_visibility(visibility); + let chunk = chunk.clone_with_vis(visibility); state.apply_chunk(&chunk) } } diff --git a/src/stream/src/executor/aggregation/distinct.rs b/src/stream/src/executor/aggregation/distinct.rs index dd5905c342710..9e1d8d66da848 100644 --- a/src/stream/src/executor/aggregation/distinct.rs +++ b/src/stream/src/executor/aggregation/distinct.rs @@ -18,8 +18,8 @@ use std::sync::atomic::AtomicU64; use std::sync::Arc; use itertools::Itertools; -use risingwave_common::array::{ArrayRef, Op, Vis, VisRef}; -use risingwave_common::buffer::BitmapBuilder; +use risingwave_common::array::{ArrayRef, Op}; +use risingwave_common::buffer::{Bitmap, BitmapBuilder}; use risingwave_common::row::{self, CompactedRow, OwnedRow, Row, RowExt}; use risingwave_common::types::{ScalarImpl, ScalarRefImpl}; use risingwave_common::util::iter_util::ZipEqFast; @@ -55,7 +55,7 @@ impl ColumnDeduplicater { &mut self, ops: &[Op], column: &ArrayRef, - mut visibilities: Vec<&mut Vis>, + mut visibilities: Vec<&mut Bitmap>, dedup_table: &mut StateTable, group_key: Option<&GroupKey>, ctx: ActorContextRef, @@ -69,6 +69,7 @@ impl ColumnDeduplicater { .map(|_| BitmapBuilder::zeroed(column.len())) .collect_vec(); let actor_id_str = ctx.id.to_string(); + let fragment_id_str = ctx.fragment_id.to_string(); let table_id_str = dedup_table.table_id().to_string(); for (datum_idx, (op, datum)) in ops.iter().zip_eq_fast(column.iter()).enumerate() { // skip if this item is hidden to all agg calls (this is likely to happen) @@ -85,7 +86,7 @@ impl ColumnDeduplicater { self.metrics_info .metrics .agg_distinct_total_cache_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); // TODO(yuhao): avoid this `contains`. // https://github.com/risingwavelabs/risingwave/issues/9233 @@ -95,7 +96,7 @@ impl ColumnDeduplicater { self.metrics_info .metrics .agg_distinct_cache_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); // load from table into the cache let counts = if let Some(counts_row) = @@ -173,11 +174,8 @@ impl ColumnDeduplicater { }); for (vis, vis_mask_inv) in visibilities.iter_mut().zip_eq(vis_masks_inv.into_iter()) { - let mask = !vis_mask_inv.finish(); - if !mask.all() { - // update visibility if needed - **vis = vis.as_ref() & VisRef::from(&mask); - } + // update visibility + **vis &= !vis_mask_inv.finish(); } // if we determine to flush to the table when processing every chunk instead of barrier @@ -193,11 +191,12 @@ impl ColumnDeduplicater { // WARN: if you want to change to batching the write to table. please remember to change // `self.cache.evict()` too. let actor_id_str = ctx.id.to_string(); + let fragment_id_str = ctx.fragment_id.to_string(); let table_id_str = dedup_table.table_id().to_string(); self.metrics_info .metrics .agg_distinct_cached_entry_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .set(self.cache.len() as i64); self.cache.evict(); } @@ -259,11 +258,11 @@ impl DistinctDeduplicater { &mut self, ops: &[Op], columns: &[ArrayRef], - mut visibilities: Vec, + mut visibilities: Vec, dedup_tables: &mut HashMap>, group_key: Option<&GroupKey>, ctx: ActorContextRef, - ) -> StreamExecutorResult> { + ) -> StreamExecutorResult> { for (distinct_col, (ref call_indices, deduplicater)) in &mut self.deduplicaters { let column = &columns[*distinct_col]; let dedup_table = dedup_tables.get_mut(distinct_col).unwrap(); diff --git a/src/stream/src/executor/aggregation/minput.rs b/src/stream/src/executor/aggregation/minput.rs index 7d65e9f787938..1329f08eb6d99 100644 --- a/src/stream/src/executor/aggregation/minput.rs +++ b/src/stream/src/executor/aggregation/minput.rs @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Bound::{self}; + use futures::{pin_mut, StreamExt}; use futures_async_stream::for_await; use itertools::Itertools; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; use risingwave_common::estimate_size::EstimateSize; -use risingwave_common::row::RowExt; +use risingwave_common::row::{OwnedRow, RowExt}; use risingwave_common::types::Datum; use risingwave_common::util::row_serde::OrderedRowSerde; use risingwave_common::util::sort_util::OrderType; -use risingwave_expr::agg::{AggCall, AggKind, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{AggCall, AggKind, BoxedAggregateFunction}; use risingwave_storage::store::PrefetchOptions; use risingwave_storage::StateStore; @@ -137,7 +139,10 @@ impl MaterializedInputState { agg_call.args.arg_types(), )) } - AggKind::StringAgg | AggKind::ArrayAgg => Box::new(GenericAggStateCache::new( + AggKind::StringAgg + | AggKind::ArrayAgg + | AggKind::JsonbAgg + | AggKind::JsonbObjectAgg => Box::new(GenericAggStateCache::new( OrderedStateCache::new(), agg_call.args.arg_types(), )), @@ -182,10 +187,12 @@ impl MaterializedInputState { ) -> StreamExecutorResult { if !self.cache.is_synced() { let mut cache_filler = self.cache.begin_syncing(); - + let sub_range: &(Bound, Bound) = + &(Bound::Unbounded, Bound::Unbounded); let all_data_iter = state_table - .iter_row_with_pk_prefix( + .iter_with_prefix( group_key.map(GroupKey::table_pk), + sub_range, PrefetchOptions { exhaust_iter: cache_filler.capacity().is_none(), }, @@ -247,7 +254,7 @@ mod tests { use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::sort_util::OrderType; - use risingwave_expr::agg::{build, AggCall}; + use risingwave_expr::aggregate::{build_append_only, AggCall}; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::StateStore; @@ -306,7 +313,7 @@ mod tests { let input_schema = Schema::new(vec![field1, field2, field3, field4]); let agg_call = AggCall::from_pretty("(min:int4 $2:int4)"); // min(c) - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = None; let (mut table, mapping) = create_mem_state_table( @@ -399,7 +406,7 @@ mod tests { let input_schema = Schema::new(vec![field1, field2, field3, field4]); let agg_call = AggCall::from_pretty("(max:int4 $2:int4)"); // max(c) - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = None; let (mut table, mapping) = create_mem_state_table( @@ -494,8 +501,8 @@ mod tests { let agg_call_1 = AggCall::from_pretty("(min:varchar $0:varchar)"); // min(a) let agg_call_2 = AggCall::from_pretty("(max:int4 $1:int4)"); // max(b) - let agg1 = build(&agg_call_1).unwrap(); - let agg2 = build(&agg_call_2).unwrap(); + let agg1 = build_append_only(&agg_call_1).unwrap(); + let agg2 = build_append_only(&agg_call_2).unwrap(); let group_key = None; let (mut table_1, mapping_1) = create_mem_state_table( @@ -599,7 +606,7 @@ mod tests { let input_schema = Schema::new(vec![field1, field2, field3, field4]); let agg_call = AggCall::from_pretty("(max:int4 $1:int4)"); // max(b) - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = Some(GroupKey::new(OwnedRow::new(vec![Some(8.into())]), None)); let (mut table, mapping) = create_mem_state_table( @@ -691,7 +698,7 @@ mod tests { let input_schema = Schema::new(vec![field1, field2]); let agg_call = AggCall::from_pretty("(min:int4 $0:int4)"); // min(a) - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = None; let (mut table, mapping) = create_mem_state_table( @@ -793,7 +800,7 @@ mod tests { let input_schema = Schema::new(vec![field1, field2]); let agg_call = AggCall::from_pretty("(min:int4 $0:int4)"); // min(a) - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = None; let (mut table, mapping) = create_mem_state_table( @@ -899,7 +906,7 @@ mod tests { let agg_call = AggCall::from_pretty( "(string_agg:varchar $0:varchar $1:varchar orderby $2:asc $0:desc)", ); - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = None; let (mut table, mapping) = create_mem_state_table( @@ -978,7 +985,7 @@ mod tests { let input_schema = Schema::new(vec![field1, field2, field3, field4]); let agg_call = AggCall::from_pretty("(array_agg:int4[] $1:int4 orderby $2:asc $0:desc)"); - let agg = build(&agg_call).unwrap(); + let agg = build_append_only(&agg_call).unwrap(); let group_key = None; let (mut table, mapping) = create_mem_state_table( diff --git a/src/stream/src/executor/aggregation/mod.rs b/src/stream/src/executor/aggregation/mod.rs index 4505ce5520981..9bb1113152962 100644 --- a/src/stream/src/executor/aggregation/mod.rs +++ b/src/stream/src/executor/aggregation/mod.rs @@ -16,13 +16,14 @@ pub use agg_group::*; pub use agg_state::*; pub use distinct::*; use risingwave_common::array::ArrayImpl::Bool; -use risingwave_common::array::{DataChunk, Vis}; +use risingwave_common::array::DataChunk; use risingwave_common::bail; +use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::{Field, Schema}; -use risingwave_expr::agg::{AggCall, AggKind}; +use risingwave_expr::aggregate::{AggCall, AggKind}; +use risingwave_expr::expr::{LogReport, NonStrictExpression}; use risingwave_storage::StateStore; -use super::ActorContextRef; use crate::common::table::state_table::StateTable; use crate::executor::error::StreamExecutorResult; use crate::executor::Executor; @@ -59,12 +60,10 @@ pub fn generate_agg_schema( } pub async fn agg_call_filter_res( - ctx: &ActorContextRef, - identity: &str, agg_call: &AggCall, chunk: &DataChunk, -) -> StreamExecutorResult { - let mut vis = chunk.vis().clone(); +) -> StreamExecutorResult { + let mut vis = chunk.visibility().clone(); if matches!( agg_call.kind, AggKind::Min | AggKind::Max | AggKind::StringAgg @@ -76,8 +75,9 @@ pub async fn agg_call_filter_res( } if let Some(ref filter) = agg_call.filter { - if let Bool(filter_res) = filter - .eval_infallible(chunk, |err| ctx.on_compute_error(err, identity)) + // TODO: should we build `filter` in non-strict mode? + if let Bool(filter_res) = NonStrictExpression::new_topmost(&**filter, LogReport) + .eval_infallible(chunk) .await .as_ref() { diff --git a/src/stream/src/executor/backfill/arrangement_backfill.rs b/src/stream/src/executor/backfill/arrangement_backfill.rs index d33aed6d6c441..ae5e8696de6c3 100644 --- a/src/stream/src/executor/backfill/arrangement_backfill.rs +++ b/src/stream/src/executor/backfill/arrangement_backfill.rs @@ -473,7 +473,7 @@ where &mut temporary_state, ).await?; - self.progress.finish(barrier.epoch.curr); + self.progress.finish(barrier.epoch.curr, total_snapshot_processed_rows); yield msg; break; } @@ -548,7 +548,7 @@ where let range_bounds = range_bounds.unwrap(); let vnode_row_iter = upstream_table - .iter_row_with_pk_range(&range_bounds, vnode, Default::default()) + .iter_with_vnode(vnode, &range_bounds, Default::default()) .await?; // TODO: Is there some way to avoid double-pin here? diff --git a/src/stream/src/executor/backfill/cdc_backfill.rs b/src/stream/src/executor/backfill/cdc_backfill.rs index 2f522ae8eeb0c..c17aad1d2d62d 100644 --- a/src/stream/src/executor/backfill/cdc_backfill.rs +++ b/src/stream/src/executor/backfill/cdc_backfill.rs @@ -48,7 +48,7 @@ use crate::executor::{ }; use crate::task::{ActorId, CreateMviewProgress}; -const BACKFILL_STATE_KEY_SUFFIX: &str = "_backfill"; +pub const BACKFILL_STATE_KEY_SUFFIX: &str = "_backfill"; pub struct CdcBackfillExecutor { actor_ctx: ActorContextRef, @@ -227,7 +227,9 @@ impl CdcBackfillExecutor { #[allow(unused_variables)] let mut total_snapshot_processed_rows: u64 = 0; - let mut last_binlog_offset: Option; + // Read the current binlog offset as a low watermark + let mut last_binlog_offset: Option = + upstream_table_reader.current_binlog_offset().await?; let mut consumed_binlog_offset: Option = None; @@ -251,7 +253,6 @@ impl CdcBackfillExecutor { // // Once the backfill loop ends, we forward the upstream directly to the downstream. if to_backfill { - last_binlog_offset = upstream_table_reader.current_binlog_offset().await?; // drive the upstream changelog first to ensure we can receive timely changelog event, // otherwise the upstream changelog may be blocked by the snapshot read stream let _ = Pin::new(&mut upstream).peek().await; @@ -348,6 +349,11 @@ impl CdcBackfillExecutor { break; } Message::Chunk(chunk) => { + // skip empty upstream chunk + if chunk.cardinality() == 0 { + continue; + } + let chunk_binlog_offset = get_cdc_chunk_last_offset( upstream_table_reader.inner().table_reader(), &chunk, @@ -441,18 +447,23 @@ impl CdcBackfillExecutor { } } } - } else { + } else if is_snapshot_empty { + tracing::info!( + upstream_table_id, + initial_binlog_offset = ?last_binlog_offset, + "upstream snapshot is empty, mark backfill is done and persist current binlog offset"); + Self::write_backfill_state( &mut self.source_state_handler, upstream_table_id, &split_id, &mut cdc_split, - None, + last_binlog_offset, ) .await?; } - tracing::debug!( + tracing::info!( actor = self.actor_id, "CdcBackfill has already finished and forward messages directly to the downstream" ); @@ -485,6 +496,11 @@ impl CdcBackfillExecutor { cdc_split: &mut Option, last_binlog_offset: Option, ) -> StreamExecutorResult<()> { + assert!( + last_binlog_offset.is_some(), + "last binlog offset cannot be None" + ); + if let Some(split_id) = split_id.as_ref() { let mut key = split_id.to_string(); key.push_str(BACKFILL_STATE_KEY_SUFFIX); @@ -517,6 +533,9 @@ impl CdcBackfillExecutor { "server".to_string() => server }, source_offset, + // upstream heartbeat event would not emit to the cdc backfill executor, + // since we don't parse heartbeat event in the source parser. + is_heartbeat: false, } }); diff --git a/src/stream/src/executor/backfill/no_shuffle_backfill.rs b/src/stream/src/executor/backfill/no_shuffle_backfill.rs index 6040a6745f9e3..97a9da0ff6a99 100644 --- a/src/stream/src/executor/backfill/no_shuffle_backfill.rs +++ b/src/stream/src/executor/backfill/no_shuffle_backfill.rs @@ -17,11 +17,12 @@ use std::sync::Arc; use either::Either; use futures::stream::select_with_strategy; -use futures::{pin_mut, stream, StreamExt, TryStreamExt}; +use futures::{pin_mut, stream, StreamExt}; use futures_async_stream::try_stream; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::catalog::Schema; -use risingwave_common::row::OwnedRow; +use risingwave_common::hash::VnodeBitmapExt; +use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::types::Datum; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; use risingwave_common::util::epoch::EpochPair; @@ -34,8 +35,8 @@ use risingwave_storage::StateStore; use crate::common::table::state_table::StateTable; use crate::executor::backfill::utils; use crate::executor::backfill::utils::{ - check_all_vnode_finished, compute_bounds, construct_initial_finished_state, get_new_pos, - iter_chunks, mapping_chunk, mapping_message, mark_chunk, owned_row_iter, + compute_bounds, construct_initial_finished_state, get_new_pos, iter_chunks, mapping_chunk, + mapping_message, mark_chunk, owned_row_iter, }; use crate::executor::monitor::StreamingMetrics; use crate::executor::{ @@ -44,6 +45,19 @@ use crate::executor::{ }; use crate::task::{ActorId, CreateMviewProgress}; +/// vnode, `is_finished`, `row_count`, all occupy 1 column each. +const METADATA_STATE_LEN: usize = 3; + +/// Schema: | vnode | pk ... | `backfill_finished` | `row_count` | +/// We can decode that into `BackfillState` on recovery. +#[derive(Debug, Eq, PartialEq)] +pub struct BackfillState { + current_pos: Option, + old_state: Option>, + is_finished: bool, + row_count: u64, +} + /// An implementation of the [RFC: Use Backfill To Let Mv On Mv Stream Again](https://github.com/risingwavelabs/rfcs/pull/13). /// `BackfillExecutor` is used to create a materialized view on another materialized view. /// @@ -77,6 +91,7 @@ pub struct BackfillExecutor { /// The column indices need to be forwarded to the downstream from the upstream and table scan. output_indices: Vec, + /// PTAL at the docstring for `CreateMviewProgress` to understand how we compute it. progress: CreateMviewProgress, actor_id: ActorId, @@ -103,12 +118,13 @@ where pk_indices: PkIndices, metrics: Arc, chunk_size: usize, + executor_id: u64, ) -> Self { Self { info: ExecutorInfo { schema, pk_indices, - identity: "BackfillExecutor".to_owned(), + identity: format!("BackfillExecutor {:X}", executor_id), }, upstream_table, upstream, @@ -125,7 +141,8 @@ where async fn execute_inner(mut self) { // The primary key columns, in the output columns of the upstream_table scan. let pk_in_output_indices = self.upstream_table.pk_in_output_indices().unwrap(); - let state_len = pk_in_output_indices.len() + 2; // +1 for backfill_finished, +1 for vnode key. + + let state_len = pk_in_output_indices.len() + METADATA_STATE_LEN; let pk_order = self.upstream_table.pk_serializer().get_order_types(); @@ -140,63 +157,20 @@ where state_table.init_epoch(first_barrier.epoch); } - let is_finished = if let Some(state_table) = self.state_table.as_mut() { - let is_finished = check_all_vnode_finished(state_table).await?; - if is_finished { - assert!(!first_barrier.is_newly_added(self.actor_id)); - } - is_finished - } else { - // Maintain backwards compatibility with no state table - !first_barrier.is_newly_added(self.actor_id) - }; + let BackfillState { + mut current_pos, + is_finished, + row_count, + mut old_state, + } = Self::recover_backfill_state(self.state_table.as_ref(), pk_in_output_indices.len()) + .await?; let mut builder = DataChunkBuilder::new(self.upstream_table.schema().data_types(), self.chunk_size); - // If the snapshot is empty, we don't need to backfill. - // We cannot complete progress now, as we want to persist - // finished state to state store first. - // As such we will wait for next barrier. - let is_snapshot_empty: bool = { - if is_finished { - // It is finished, so just assign a value to avoid accessing storage table again. - false - } else { - let snapshot_is_empty = { - let snapshot = Self::snapshot_read( - &self.upstream_table, - init_epoch, - None, - false, - &mut builder, - ); - pin_mut!(snapshot); - snapshot.try_next().await?.unwrap().is_none() - }; - let snapshot_buffer_is_empty = builder.is_empty(); - builder.clear(); - snapshot_is_empty && snapshot_buffer_is_empty - } - }; - - // | backfill_is_finished | snapshot_empty | need_to_backfill | - // | t | t/f | f | - // | f | t | f | - // | f | f | t | - let to_backfill = !is_finished && !is_snapshot_empty; - - // Current position of the upstream_table storage primary key. - // `None` means it starts from the beginning. - let mut current_pos: Option = None; - - // Use these to persist state. - // They contain the backfill position, - // as well as the progress. - // However, they do not contain the vnode key at index 0. - // That is filled in when we flush the state table. + // Use this buffer to construct state, + // which will then be persisted. let mut current_state: Vec = vec![None; state_len]; - let mut old_state: Option> = None; // The first barrier message should be propagated. yield Message::Barrier(first_barrier); @@ -215,7 +189,7 @@ where let mut snapshot_read_epoch = init_epoch; // Keep track of rows from the snapshot. - let mut total_snapshot_processed_rows: u64 = 0; + let mut total_snapshot_processed_rows: u64 = row_count; // Backfill Algorithm: // @@ -240,12 +214,14 @@ where // finished. // // Once the backfill loop ends, we forward the upstream directly to the downstream. - if to_backfill { + if !is_finished { let mut upstream_chunk_buffer: Vec = vec![]; let mut pending_barrier: Option = None; 'backfill_loop: loop { let mut cur_barrier_snapshot_processed_rows: u64 = 0; let mut cur_barrier_upstream_processed_rows: u64 = 0; + let mut snapshot_read_complete = false; + let mut has_snapshot_read = false; // We should not buffer rows from previous epoch, else we can have duplicates. assert!(upstream_chunk_buffer.is_empty()); @@ -264,13 +240,13 @@ where // Prefer to select upstream, so we can stop snapshot stream as soon as the // barrier comes. - let backfill_stream = + let mut backfill_stream = select_with_strategy(left_upstream, right_snapshot, |_: &mut ()| { stream::PollNext::Left }); #[for_await] - for either in backfill_stream { + for either in &mut backfill_stream { match either { // Upstream Either::Left(msg) => { @@ -297,6 +273,7 @@ where } // Snapshot read Either::Right(msg) => { + has_snapshot_read = true; match msg? { None => { // End of the snapshot read stream. @@ -336,6 +313,43 @@ where } } } + + // Before processing barrier, if did not snapshot read, + // do a snapshot read first. + // This is so we don't lose the tombstone iteration progress. + if !has_snapshot_read { + let (_, snapshot) = backfill_stream.into_inner(); + #[for_await] + for msg in snapshot { + let Either::Right(msg) = msg else { + bail!("BUG: snapshot_read contains upstream messages"); + }; + match msg? { + None => { + // End of the snapshot read stream. + // We let the barrier handling logic take care of upstream updates. + // But we still want to exit backfill loop, so we mark snapshot read complete. + snapshot_read_complete = true; + break; + } + Some(chunk) => { + // Raise the current position. + // As snapshot read streams are ordered by pk, so we can + // just use the last row to update `current_pos`. + current_pos = Some(get_new_pos(&chunk, &pk_in_output_indices)); + + let chunk_cardinality = chunk.cardinality() as u64; + cur_barrier_snapshot_processed_rows += chunk_cardinality; + total_snapshot_processed_rows += chunk_cardinality; + yield Message::Chunk(mapping_chunk( + chunk, + &self.output_indices, + )); + break; + } + } + } + } } // When we break out of inner backfill_stream loop, it means we have a barrier. // If there are no updates and there are no snapshots left, @@ -410,54 +424,83 @@ where &mut self.state_table, false, ¤t_pos, + total_snapshot_processed_rows, &mut old_state, &mut current_state, ) .await?; + tracing::trace!( + epoch = ?barrier.epoch, + ?current_pos, + total_snapshot_processed_rows, + "Backfill state persisted" + ); + yield Message::Barrier(barrier); + if snapshot_read_complete { + break 'backfill_loop; + } + // We will switch snapshot at the start of the next iteration of the backfill loop. } } - tracing::trace!( - actor = self.actor_id, - "Backfill has already finished and forward messages directly to the downstream" - ); + tracing::trace!("Backfill has finished, waiting for barrier"); // Wait for first barrier to come after backfill is finished. // So we can update our progress + persist the status. while let Some(Ok(msg)) = upstream.next().await { if let Some(msg) = mapping_message(msg, &self.output_indices) { // If not finished then we need to update state, otherwise no need. - if let Message::Barrier(barrier) = &msg && !is_finished { - // If snapshot was empty, we do not need to backfill, - // but we still need to persist the finished state. - // We currently persist it on the second barrier here rather than first. - // This is because we can't update state table in first epoch, - // since it expects to have been initialized in previous epoch - // (there's no epoch before the first epoch). - if is_snapshot_empty { - current_pos = - Some(construct_initial_finished_state(pk_in_output_indices.len())) - } + if let Message::Barrier(barrier) = &msg { + if is_finished { + // If already finished, no need persist any state. + } else { + // If snapshot was empty, we do not need to backfill, + // but we still need to persist the finished state. + // We currently persist it on the second barrier here rather than first. + // This is because we can't update state table in first epoch, + // since it expects to have been initialized in previous epoch + // (there's no epoch before the first epoch). + if current_pos.is_none() { + current_pos = + Some(construct_initial_finished_state(pk_in_output_indices.len())) + } - // We will update current_pos at least once, - // since snapshot read has to be non-empty, - // Or snapshot was empty and we construct a placeholder state. - debug_assert_ne!(current_pos, None); + // We will update current_pos at least once, + // since snapshot read has to be non-empty, + // Or snapshot was empty and we construct a placeholder state. + debug_assert_ne!(current_pos, None); + + Self::persist_state( + barrier.epoch, + &mut self.state_table, + true, + ¤t_pos, + total_snapshot_processed_rows, + &mut old_state, + &mut current_state, + ) + .await?; + tracing::trace!( + epoch = ?barrier.epoch, + ?current_pos, + total_snapshot_processed_rows, + "Backfill position persisted after completion" + ); + } - Self::persist_state( - barrier.epoch, - &mut self.state_table, - true, - ¤t_pos, - &mut old_state, - &mut current_state, - ) - .await?; - self.progress.finish(barrier.epoch.curr); + // For both backfill finished before recovery, + // and backfill which just finished, we need to update mview tracker, + // it does not persist this information. + self.progress + .finish(barrier.epoch.curr, total_snapshot_processed_rows); + tracing::trace!( + epoch = ?barrier.epoch, + "Updated CreateMaterializedTracker" + ); yield msg; break; } @@ -465,20 +508,80 @@ where } } + tracing::trace!( + "Backfill has already finished and forward messages directly to the downstream" + ); + // After progress finished + state persisted, // we can forward messages directly to the downstream, // as backfill is finished. + // We don't need to report backfill progress any longer, as it has finished. + // It will always be at 100%. #[for_await] for msg in upstream { if let Some(msg) = mapping_message(msg?, &self.output_indices) { - if let Some(state_table) = self.state_table.as_mut() && let Message::Barrier(barrier) = &msg { - state_table.commit_no_data_expected(barrier.epoch); - } yield msg; } } } + async fn recover_backfill_state( + state_table: Option<&StateTable>, + pk_len: usize, + ) -> StreamExecutorResult { + let Some(state_table) = state_table else { + // If no state table, but backfill is present, it must be from an old cluster. + // In that case backfill must be finished, otherwise it won't have been persisted. + return Ok(BackfillState { + current_pos: None, + is_finished: true, + row_count: 0, + old_state: None, + }); + }; + let mut vnodes = state_table.vnodes().iter_vnodes_scalar(); + let first_vnode = vnodes.next().unwrap(); + let key: &[Datum] = &[Some(first_vnode.into())]; + let row = state_table.get_row(key).await?; + let expected_state = Self::deserialize_backfill_state(row, pk_len); + + // All vnode partitions should have same state (no scale-in supported). + for vnode in vnodes { + let key: &[Datum] = &[Some(vnode.into())]; + let row = state_table.get_row(key).await?; + let state = Self::deserialize_backfill_state(row, pk_len); + assert_eq!(state.is_finished, expected_state.is_finished); + } + Ok(expected_state) + } + + fn deserialize_backfill_state(row: Option, pk_len: usize) -> BackfillState { + let Some(row) = row else { + return BackfillState { + current_pos: None, + is_finished: false, + row_count: 0, + old_state: None, + }; + }; + let row = row.into_inner(); + let mut old_state = vec![None; pk_len + METADATA_STATE_LEN]; + old_state[1..row.len() + 1].clone_from_slice(&row); + let current_pos = Some((&row[0..pk_len]).into_owned_row()); + let is_finished = row[pk_len].clone().map_or(false, |d| d.into_bool()); + let row_count = row + .get(pk_len + 1) + .cloned() + .unwrap_or(None) + .map_or(0, |d| d.into_int64() as u64); + BackfillState { + current_pos, + is_finished, + row_count, + old_state: Some(old_state), + } + } + /// Snapshot read the upstream mv. /// The rows from upstream snapshot read will be buffered inside the `builder`. /// If snapshot is dropped before its rows are consumed, @@ -528,6 +631,7 @@ where table: &mut Option>, is_finished: bool, current_pos: &Option, + row_count: u64, old_state: &mut Option>, current_state: &mut [Datum], ) -> StreamExecutorResult<()> { @@ -538,6 +642,7 @@ where table, is_finished, current_pos, + row_count, old_state, current_state, ) diff --git a/src/stream/src/executor/backfill/upstream_table/snapshot.rs b/src/stream/src/executor/backfill/upstream_table/snapshot.rs index 806d78700154e..0e17ba7e722c4 100644 --- a/src/stream/src/executor/backfill/upstream_table/snapshot.rs +++ b/src/stream/src/executor/backfill/upstream_table/snapshot.rs @@ -24,21 +24,17 @@ use risingwave_connector::source::external::{CdcOffset, ExternalTableReader}; use crate::executor::backfill::upstream_table::external::ExternalStorageTable; use crate::executor::backfill::utils::iter_chunks; -use crate::executor::{StreamExecutorResult, INVALID_EPOCH}; +use crate::executor::{StreamExecutorError, StreamExecutorResult, INVALID_EPOCH}; pub trait UpstreamTableRead { - type BinlogOffsetFuture<'a>: Future>> - + Send - + 'a - where - Self: 'a; - type SnapshotStream<'a>: Stream>> + Send + 'a - where - Self: 'a; - - fn snapshot_read(&self, args: SnapshotReadArgs) -> Self::SnapshotStream<'_>; - - fn current_binlog_offset(&self) -> Self::BinlogOffsetFuture<'_>; + fn snapshot_read( + &self, + args: SnapshotReadArgs, + ) -> impl Stream>> + Send + '_; + + fn current_binlog_offset( + &self, + ) -> impl Future>> + Send + '_; } #[derive(Debug, Default)] @@ -92,52 +88,43 @@ impl UpstreamTableReader { } impl UpstreamTableRead for UpstreamTableReader { - type BinlogOffsetFuture<'a> = - impl Future>> + 'a; - type SnapshotStream<'a> = impl Stream>> + 'a; - - fn snapshot_read(&self, args: SnapshotReadArgs) -> Self::SnapshotStream<'_> { - #[try_stream] - async move { - let primary_keys = self - .inner - .pk_indices() - .iter() - .map(|idx| { - let f = &self.inner.schema().fields[*idx]; - f.name.clone() - }) - .collect_vec(); - - tracing::debug!( - "snapshot_read primary keys: {:?}, current_pos: {:?}", - primary_keys, - args.current_pos - ); - - let row_stream = self.inner.table_reader().snapshot_read( - self.inner.schema_table_name(), - args.current_pos, - primary_keys, - ); - - pin_mut!(row_stream); - - let mut builder = - DataChunkBuilder::new(self.inner.schema().data_types(), args.chunk_size); - let chunk_stream = iter_chunks(row_stream, &mut builder); - #[for_await] - for chunk in chunk_stream { - yield chunk?; - } + #[try_stream(ok = Option, error = StreamExecutorError)] + async fn snapshot_read(&self, args: SnapshotReadArgs) { + let primary_keys = self + .inner + .pk_indices() + .iter() + .map(|idx| { + let f = &self.inner.schema().fields[*idx]; + f.name.clone() + }) + .collect_vec(); + + tracing::debug!( + "snapshot_read primary keys: {:?}, current_pos: {:?}", + primary_keys, + args.current_pos + ); + + let row_stream = self.inner.table_reader().snapshot_read( + self.inner.schema_table_name(), + args.current_pos, + primary_keys, + ); + + pin_mut!(row_stream); + + let mut builder = DataChunkBuilder::new(self.inner.schema().data_types(), args.chunk_size); + let chunk_stream = iter_chunks(row_stream, &mut builder); + #[for_await] + for chunk in chunk_stream { + yield chunk?; } } - fn current_binlog_offset(&self) -> Self::BinlogOffsetFuture<'_> { - async move { - let binlog = self.inner.table_reader().current_cdc_offset(); - let binlog = binlog.await?; - Ok(Some(binlog)) - } + async fn current_binlog_offset(&self) -> StreamExecutorResult> { + let binlog = self.inner.table_reader().current_cdc_offset(); + let binlog = binlog.await?; + Ok(Some(binlog)) } } diff --git a/src/stream/src/executor/backfill/utils.rs b/src/stream/src/executor/backfill/utils.rs index 6c49be9e607a1..663f9be94cf5e 100644 --- a/src/stream/src/executor/backfill/utils.rs +++ b/src/stream/src/executor/backfill/utils.rs @@ -170,10 +170,10 @@ pub(crate) fn mark_chunk_ref_by_vnode( new_visibility.append(v); } let (columns, _) = data.into_parts(); - Ok(StreamChunk::new( + Ok(StreamChunk::with_visibility( ops, columns, - Some(new_visibility.finish()), + new_visibility.finish(), )) } @@ -201,7 +201,7 @@ fn mark_chunk_inner( new_visibility.append(v); } let (columns, _) = data.into_parts(); - StreamChunk::new(ops, columns, Some(new_visibility.finish())) + StreamChunk::with_visibility(ops, columns, new_visibility.finish()) } fn mark_cdc_chunk_inner( @@ -246,10 +246,10 @@ fn mark_cdc_chunk_inner( } let (columns, _) = data.into_parts(); - Ok(StreamChunk::new( + Ok(StreamChunk::with_visibility( ops, columns, - Some(new_visibility.finish()), + new_visibility.finish(), )) } @@ -257,7 +257,7 @@ fn mark_cdc_chunk_inner( pub(crate) fn mapping_chunk(chunk: StreamChunk, output_indices: &[usize]) -> StreamChunk { let (ops, columns, visibility) = chunk.into_inner(); let mapped_columns = output_indices.iter().map(|&i| columns[i].clone()).collect(); - StreamChunk::new(ops, mapped_columns, visibility.into_visibility()) + StreamChunk::with_visibility(ops, mapped_columns, visibility) } fn mapping_watermark(watermark: Watermark, upstream_indices: &[usize]) -> Option { @@ -308,36 +308,7 @@ pub(crate) async fn get_progress_per_vnode( - state_table: &StateTableInner, -) -> StreamExecutorResult { - debug_assert!(!state_table.vnode_bitmap().is_empty()); - let vnodes = state_table.vnodes().iter_vnodes_scalar(); - let mut is_finished = true; - for vnode in vnodes { - let key: &[Datum] = &[Some(vnode.into())]; - let row = state_table.get_row(key).await?; - - let vnode_is_finished = if let Some(row) = row - && let Some(vnode_is_finished) = row.last() - { - vnode_is_finished.into_bool() - } else { - false - }; - if !vnode_is_finished { - is_finished = false; - break; - } - } - Ok(is_finished) -} - /// Flush the data -// This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. -// TODO: remove `allow` here after the issued is closed. -#[expect(clippy::needless_pass_by_ref_mut)] pub(crate) async fn flush_data( table: &mut StateTableInner, epoch: EpochPair, @@ -381,21 +352,26 @@ pub(crate) fn build_temporary_state_with_vnode( is_finished: bool, current_pos: &OwnedRow, ) { - build_temporary_state(row_state, is_finished, current_pos); + row_state[1..current_pos.len() + 1].clone_from_slice(current_pos.as_inner()); + row_state[current_pos.len() + 1] = Some(is_finished.into()); row_state[0] = Some(vnode.to_scalar().into()); } /// We want to avoid allocating a row for every vnode. /// Instead we can just modify a single row, and dispatch it to state table to write. -/// This builds the `current_pos` segment of the row. -/// Vnode needs to be filled in as well. +/// This builds the following segments of the row: +/// 1. `current_pos` +/// 2. `backfill_finished` +/// 3. `row_count` pub(crate) fn build_temporary_state( row_state: &mut [Datum], is_finished: bool, current_pos: &OwnedRow, + row_count: u64, ) { row_state[1..current_pos.len() + 1].clone_from_slice(current_pos.as_inner()); row_state[current_pos.len() + 1] = Some(is_finished.into()); + row_state[current_pos.len() + 2] = Some((row_count as i64).into()); } /// Update backfill pos by vnode. @@ -560,12 +536,13 @@ pub(crate) async fn persist_state( table: &mut StateTableInner, is_finished: bool, current_pos: &Option, + row_count: u64, old_state: &mut Option>, current_state: &mut [Datum], ) -> StreamExecutorResult<()> { if let Some(current_pos_inner) = current_pos { // state w/o vnodes. - build_temporary_state(current_state, is_finished, current_pos_inner); + build_temporary_state(current_state, is_finished, current_pos_inner, row_count); flush_data(table, epoch, old_state, current_state).await?; *old_state = Some(current_state.into()); } else { diff --git a/src/stream/src/executor/chain.rs b/src/stream/src/executor/chain.rs index ab3ef9ae44973..a51c9e95abbb1 100644 --- a/src/stream/src/executor/chain.rs +++ b/src/stream/src/executor/chain.rs @@ -79,7 +79,7 @@ impl ChainExecutor { // If the barrier is a conf change of creating this mview, and the snapshot is not to be // consumed, we can finish the progress immediately. if barrier.is_newly_added(self.actor_id) && self.upstream_only { - self.progress.finish(barrier.epoch.curr); + self.progress.finish(barrier.epoch.curr, 0); } // The first barrier message should be propagated. @@ -103,7 +103,7 @@ impl ChainExecutor { for msg in upstream { let msg = msg?; if to_consume_snapshot && let Message::Barrier(barrier) = &msg { - self.progress.finish(barrier.epoch.curr); + self.progress.finish(barrier.epoch.curr, 0); } yield msg; } diff --git a/src/stream/src/executor/dedup/append_only_dedup.rs b/src/stream/src/executor/dedup/append_only_dedup.rs index 890b1e4ac997d..898f78290bbb0 100644 --- a/src/stream/src/executor/dedup/append_only_dedup.rs +++ b/src/stream/src/executor/dedup/append_only_dedup.rs @@ -130,7 +130,7 @@ impl AppendOnlyDedupExecutor { if vis.count_ones() > 0 { // Construct the new chunk and write the data to state table. let (ops, columns, _) = chunk.into_inner(); - let chunk = StreamChunk::new(ops, columns, Some(vis)); + let chunk = StreamChunk::with_visibility(ops, columns, vis); self.state_table.write_chunk(chunk.clone()); commit_data = true; diff --git a/src/stream/src/executor/dispatch.rs b/src/stream/src/executor/dispatch.rs index 17b8866543c6f..414721c34efbf 100644 --- a/src/stream/src/executor/dispatch.rs +++ b/src/stream/src/executor/dispatch.rs @@ -51,6 +51,7 @@ struct DispatchExecutorInner { dispatchers: Vec, actor_id: u32, actor_id_str: String, + fragment_id_str: String, context: Arc, metrics: Arc, } @@ -66,24 +67,50 @@ impl DispatchExecutorInner { } async fn dispatch(&mut self, msg: Message) -> StreamResult<()> { - let start_time = Instant::now(); match msg { Message::Watermark(watermark) => { for dispatcher in &mut self.dispatchers { + let start_time = Instant::now(); dispatcher.dispatch_watermark(watermark.clone()).await?; + self.metrics + .actor_output_buffer_blocking_duration_ns + .with_label_values(&[ + &self.actor_id_str, + &self.fragment_id_str, + dispatcher.dispatcher_id_str(), + ]) + .inc_by(start_time.elapsed().as_nanos() as u64); } } Message::Chunk(chunk) => { self.metrics .actor_out_record_cnt - .with_label_values(&[&self.actor_id_str]) + .with_label_values(&[&self.actor_id_str, &self.fragment_id_str]) .inc_by(chunk.cardinality() as _); if self.dispatchers.len() == 1 { // special clone optimization when there is only one downstream dispatcher + let start_time = Instant::now(); self.single_inner_mut().dispatch_data(chunk).await?; + self.metrics + .actor_output_buffer_blocking_duration_ns + .with_label_values(&[ + &self.actor_id_str, + &self.fragment_id_str, + self.dispatchers[0].dispatcher_id_str(), + ]) + .inc_by(start_time.elapsed().as_nanos() as u64); } else { for dispatcher in &mut self.dispatchers { + let start_time = Instant::now(); dispatcher.dispatch_data(chunk.clone()).await?; + self.metrics + .actor_output_buffer_blocking_duration_ns + .with_label_values(&[ + &self.actor_id_str, + &self.fragment_id_str, + dispatcher.dispatcher_id_str(), + ]) + .inc_by(start_time.elapsed().as_nanos() as u64); } } } @@ -91,15 +118,20 @@ impl DispatchExecutorInner { let mutation = barrier.mutation.clone(); self.pre_mutate_dispatchers(&mutation)?; for dispatcher in &mut self.dispatchers { + let start_time = Instant::now(); dispatcher.dispatch_barrier(barrier.clone()).await?; + self.metrics + .actor_output_buffer_blocking_duration_ns + .with_label_values(&[ + &self.actor_id_str, + &self.fragment_id_str, + dispatcher.dispatcher_id_str(), + ]) + .inc_by(start_time.elapsed().as_nanos() as u64); } self.post_mutate_dispatchers(&mutation)?; } }; - self.metrics - .actor_output_buffer_blocking_duration_ns - .with_label_values(&[&self.actor_id_str]) - .inc_by(start_time.elapsed().as_nanos() as u64); Ok(()) } @@ -253,6 +285,7 @@ impl DispatchExecutor { input: BoxedExecutor, dispatchers: Vec, actor_id: u32, + fragment_id: u32, context: Arc, metrics: Arc, ) -> Self { @@ -262,6 +295,7 @@ impl DispatchExecutor { dispatchers, actor_id, actor_id_str: actor_id.to_string(), + fragment_id_str: fragment_id.to_string(), context, metrics, }, @@ -411,6 +445,12 @@ macro_rules! impl_dispatcher { } } + pub fn dispatcher_id_str(&self) -> &str { + match self { + $(Self::$variant_name(inner) => inner.dispatcher_id_str(), )* + } + } + pub fn is_empty(&self) -> bool { match self { $(Self::$variant_name(inner) => inner.is_empty(), )* @@ -433,27 +473,15 @@ macro_rules! for_all_dispatcher_variants { for_all_dispatcher_variants! { impl_dispatcher } -macro_rules! define_dispatcher_associated_types { - () => { - type DataFuture<'a> = impl DispatchFuture<'a>; - type BarrierFuture<'a> = impl DispatchFuture<'a>; - type WatermarkFuture<'a> = impl DispatchFuture<'a>; - }; -} - pub trait DispatchFuture<'a> = Future> + Send; pub trait Dispatcher: Debug + 'static { - type DataFuture<'a>: DispatchFuture<'a>; - type BarrierFuture<'a>: DispatchFuture<'a>; - type WatermarkFuture<'a>: DispatchFuture<'a>; - /// Dispatch a data chunk to downstream actors. - fn dispatch_data(&mut self, chunk: StreamChunk) -> Self::DataFuture<'_>; + fn dispatch_data(&mut self, chunk: StreamChunk) -> impl DispatchFuture<'_>; /// Dispatch a barrier to downstream actors, generally by broadcasting it. - fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_>; + fn dispatch_barrier(&mut self, barrier: Barrier) -> impl DispatchFuture<'_>; /// Dispatch a watermark to downstream actors, generally by broadcasting it. - fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_>; + fn dispatch_watermark(&mut self, watermark: Watermark) -> impl DispatchFuture<'_>; /// Add new outputs to the dispatcher. fn add_outputs(&mut self, outputs: impl IntoIterator); @@ -462,8 +490,14 @@ pub trait Dispatcher: Debug + 'static { /// The ID of the dispatcher. A [`DispatchExecutor`] may have multiple dispatchers with /// different IDs. + /// + /// Note that the dispatcher id is always equal to the downstream fragment id. + /// See also `proto/stream_plan.proto`. fn dispatcher_id(&self) -> DispatcherId; + /// Dispatcher id in string. See [`Dispatcher::dispatcher_id`]. + fn dispatcher_id_str(&self) -> &str; + /// Whether the dispatcher has no outputs. If so, it'll be cleaned up from the /// [`DispatchExecutor`]. fn is_empty(&self) -> bool; @@ -475,6 +509,7 @@ pub struct RoundRobinDataDispatcher { output_indices: Vec, cur: usize, dispatcher_id: DispatcherId, + dispatcher_id_str: String, } impl RoundRobinDataDispatcher { @@ -488,43 +523,36 @@ impl RoundRobinDataDispatcher { output_indices, cur: 0, dispatcher_id, + dispatcher_id_str: dispatcher_id.to_string(), } } } impl Dispatcher for RoundRobinDataDispatcher { - define_dispatcher_associated_types!(); + async fn dispatch_data(&mut self, chunk: StreamChunk) -> StreamResult<()> { + let chunk = chunk.project(&self.output_indices); + self.outputs[self.cur].send(Message::Chunk(chunk)).await?; + self.cur += 1; + self.cur %= self.outputs.len(); + Ok(()) + } - fn dispatch_data(&mut self, chunk: StreamChunk) -> Self::DataFuture<'_> { - async move { - let chunk = chunk.project(&self.output_indices); - self.outputs[self.cur].send(Message::Chunk(chunk)).await?; - self.cur += 1; - self.cur %= self.outputs.len(); - Ok(()) + async fn dispatch_barrier(&mut self, barrier: Barrier) -> StreamResult<()> { + // always broadcast barrier + for output in &mut self.outputs { + output.send(Message::Barrier(barrier.clone())).await?; } + Ok(()) } - fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_> { - async move { - // always broadcast barrier + async fn dispatch_watermark(&mut self, watermark: Watermark) -> StreamResult<()> { + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + // always broadcast watermark for output in &mut self.outputs { - output.send(Message::Barrier(barrier.clone())).await?; + output.send(Message::Watermark(watermark.clone())).await?; } - Ok(()) - } - } - - fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { - async move { - if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { - // always broadcast watermark - for output in &mut self.outputs { - output.send(Message::Watermark(watermark.clone())).await?; - } - } - Ok(()) } + Ok(()) } fn add_outputs(&mut self, outputs: impl IntoIterator) { @@ -542,6 +570,10 @@ impl Dispatcher for RoundRobinDataDispatcher { self.dispatcher_id } + fn dispatcher_id_str(&self) -> &str { + &self.dispatcher_id_str + } + fn is_empty(&self) -> bool { self.outputs.is_empty() } @@ -555,6 +587,7 @@ pub struct HashDataDispatcher { /// different downstream actors. hash_mapping: ExpandedActorMapping, dispatcher_id: DispatcherId, + dispatcher_id_str: String, } impl Debug for HashDataDispatcher { @@ -581,117 +614,108 @@ impl HashDataDispatcher { output_indices, hash_mapping, dispatcher_id, + dispatcher_id_str: dispatcher_id.to_string(), } } } impl Dispatcher for HashDataDispatcher { - define_dispatcher_associated_types!(); - fn add_outputs(&mut self, outputs: impl IntoIterator) { self.outputs.extend(outputs); } - fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_> { - async move { - // always broadcast barrier - for output in &mut self.outputs { - output.send(Message::Barrier(barrier.clone())).await?; - } - Ok(()) + async fn dispatch_barrier(&mut self, barrier: Barrier) -> StreamResult<()> { + // always broadcast barrier + for output in &mut self.outputs { + output.send(Message::Barrier(barrier.clone())).await?; } + Ok(()) } - fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { - async move { - if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { - // always broadcast watermark - for output in &mut self.outputs { - output.send(Message::Watermark(watermark.clone())).await?; - } + async fn dispatch_watermark(&mut self, watermark: Watermark) -> StreamResult<()> { + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + // always broadcast watermark + for output in &mut self.outputs { + output.send(Message::Watermark(watermark.clone())).await?; } - Ok(()) } + Ok(()) } - fn dispatch_data(&mut self, chunk: StreamChunk) -> Self::DataFuture<'_> { - async move { - // A chunk can be shuffled into multiple output chunks that to be sent to downstreams. - // In these output chunks, the only difference are visibility map, which is calculated - // by the hash value of each line in the input chunk. - let num_outputs = self.outputs.len(); + async fn dispatch_data(&mut self, chunk: StreamChunk) -> StreamResult<()> { + // A chunk can be shuffled into multiple output chunks that to be sent to downstreams. + // In these output chunks, the only difference are visibility map, which is calculated + // by the hash value of each line in the input chunk. + let num_outputs = self.outputs.len(); - // get hash value of every line by its key - let vnodes = VirtualNode::compute_chunk(chunk.data_chunk(), &self.keys); + // get hash value of every line by its key + let vnodes = VirtualNode::compute_chunk(chunk.data_chunk(), &self.keys); - tracing::trace!(target: "events::stream::dispatch::hash", "\n{}\n keys {:?} => {:?}", chunk.to_pretty(), self.keys, vnodes); + tracing::debug!(target: "events::stream::dispatch::hash", "\n{}\n keys {:?} => {:?}", chunk.to_pretty(), self.keys, vnodes); - let mut vis_maps = repeat_with(|| BitmapBuilder::with_capacity(chunk.capacity())) - .take(num_outputs) - .collect_vec(); - let mut last_vnode_when_update_delete = None; - let mut new_ops: Vec = Vec::with_capacity(chunk.capacity()); + let mut vis_maps = repeat_with(|| BitmapBuilder::with_capacity(chunk.capacity())) + .take(num_outputs) + .collect_vec(); + let mut last_vnode_when_update_delete = None; + let mut new_ops: Vec = Vec::with_capacity(chunk.capacity()); - // Apply output indices after calculating the vnode. - let chunk = chunk.project(&self.output_indices); + // Apply output indices after calculating the vnode. + let chunk = chunk.project(&self.output_indices); - for ((vnode, &op), visible) in vnodes - .iter() - .copied() - .zip_eq_fast(chunk.ops()) - .zip_eq_fast(chunk.vis().iter()) - { - // Build visibility map for every output chunk. - for (output, vis_map) in self.outputs.iter().zip_eq_fast(vis_maps.iter_mut()) { - vis_map.append( - visible && self.hash_mapping[vnode.to_index()] == output.actor_id(), - ); - } + for ((vnode, &op), visible) in vnodes + .iter() + .copied() + .zip_eq_fast(chunk.ops()) + .zip_eq_fast(chunk.visibility().iter()) + { + // Build visibility map for every output chunk. + for (output, vis_map) in self.outputs.iter().zip_eq_fast(vis_maps.iter_mut()) { + vis_map.append(visible && self.hash_mapping[vnode.to_index()] == output.actor_id()); + } - if !visible { - new_ops.push(op); - continue; - } + if !visible { + new_ops.push(op); + continue; + } - // The 'update' message, noted by an `UpdateDelete` and a successive `UpdateInsert`, - // need to be rewritten to common `Delete` and `Insert` if they were dispatched to - // different actors. - if op == Op::UpdateDelete { - last_vnode_when_update_delete = Some(vnode); - } else if op == Op::UpdateInsert { - if vnode != last_vnode_when_update_delete.unwrap() { - new_ops.push(Op::Delete); - new_ops.push(Op::Insert); - } else { - new_ops.push(Op::UpdateDelete); - new_ops.push(Op::UpdateInsert); - } + // The 'update' message, noted by an `UpdateDelete` and a successive `UpdateInsert`, + // need to be rewritten to common `Delete` and `Insert` if they were dispatched to + // different actors. + if op == Op::UpdateDelete { + last_vnode_when_update_delete = Some(vnode); + } else if op == Op::UpdateInsert { + if vnode != last_vnode_when_update_delete.unwrap() { + new_ops.push(Op::Delete); + new_ops.push(Op::Insert); } else { - new_ops.push(op); + new_ops.push(Op::UpdateDelete); + new_ops.push(Op::UpdateInsert); } + } else { + new_ops.push(op); } + } - let ops = new_ops; - - // individually output StreamChunk integrated with vis_map - for (vis_map, output) in vis_maps.into_iter().zip_eq_fast(self.outputs.iter_mut()) { - let vis_map = vis_map.finish(); - // columns is not changed in this function - let new_stream_chunk = - StreamChunk::new(ops.clone(), chunk.columns().into(), Some(vis_map)); - if new_stream_chunk.cardinality() > 0 { - event!( - tracing::Level::TRACE, - msg = "chunk", - downstream = output.actor_id(), - "send = \n{:#?}", - new_stream_chunk - ); - output.send(Message::Chunk(new_stream_chunk)).await?; - } + let ops = new_ops; + + // individually output StreamChunk integrated with vis_map + for (vis_map, output) in vis_maps.into_iter().zip_eq_fast(self.outputs.iter_mut()) { + let vis_map = vis_map.finish(); + // columns is not changed in this function + let new_stream_chunk = + StreamChunk::with_visibility(ops.clone(), chunk.columns().into(), vis_map); + if new_stream_chunk.cardinality() > 0 { + event!( + tracing::Level::TRACE, + msg = "chunk", + downstream = output.actor_id(), + "send = \n{:#?}", + new_stream_chunk + ); + output.send(Message::Chunk(new_stream_chunk)).await?; } - Ok(()) } + Ok(()) } fn remove_outputs(&mut self, actor_ids: &HashSet) { @@ -704,6 +728,10 @@ impl Dispatcher for HashDataDispatcher { self.dispatcher_id } + fn dispatcher_id_str(&self) -> &str { + &self.dispatcher_id_str + } + fn is_empty(&self) -> bool { self.outputs.is_empty() } @@ -715,6 +743,7 @@ pub struct BroadcastDispatcher { outputs: HashMap, output_indices: Vec, dispatcher_id: DispatcherId, + dispatcher_id_str: String, } impl BroadcastDispatcher { @@ -727,6 +756,7 @@ impl BroadcastDispatcher { outputs: Self::into_pairs(outputs).collect(), output_indices, dispatcher_id, + dispatcher_id_str: dispatcher_id.to_string(), } } @@ -740,37 +770,29 @@ impl BroadcastDispatcher { } impl Dispatcher for BroadcastDispatcher { - define_dispatcher_associated_types!(); - - fn dispatch_data(&mut self, chunk: StreamChunk) -> Self::DataFuture<'_> { - async move { - let chunk = chunk.project(&self.output_indices); - for output in self.outputs.values_mut() { - output.send(Message::Chunk(chunk.clone())).await?; - } - Ok(()) + async fn dispatch_data(&mut self, chunk: StreamChunk) -> StreamResult<()> { + let chunk = chunk.project(&self.output_indices); + for output in self.outputs.values_mut() { + output.send(Message::Chunk(chunk.clone())).await?; } + Ok(()) } - fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_> { - async move { - for output in self.outputs.values_mut() { - output.send(Message::Barrier(barrier.clone())).await?; - } - Ok(()) + async fn dispatch_barrier(&mut self, barrier: Barrier) -> StreamResult<()> { + for output in self.outputs.values_mut() { + output.send(Message::Barrier(barrier.clone())).await?; } + Ok(()) } - fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { - async move { - if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { - // always broadcast watermark - for output in self.outputs.values_mut() { - output.send(Message::Watermark(watermark.clone())).await?; - } + async fn dispatch_watermark(&mut self, watermark: Watermark) -> StreamResult<()> { + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + // always broadcast watermark + for output in self.outputs.values_mut() { + output.send(Message::Watermark(watermark.clone())).await?; } - Ok(()) } + Ok(()) } fn add_outputs(&mut self, outputs: impl IntoIterator) { @@ -787,6 +809,10 @@ impl Dispatcher for BroadcastDispatcher { self.dispatcher_id } + fn dispatcher_id_str(&self) -> &str { + &self.dispatcher_id_str + } + fn is_empty(&self) -> bool { self.outputs.is_empty() } @@ -811,6 +837,7 @@ pub struct SimpleDispatcher { output: SmallVec<[BoxedOutput; 2]>, output_indices: Vec, dispatcher_id: DispatcherId, + dispatcher_id_str: String, } impl SimpleDispatcher { @@ -823,54 +850,47 @@ impl SimpleDispatcher { output: smallvec![output], output_indices, dispatcher_id, + dispatcher_id_str: dispatcher_id.to_string(), } } } impl Dispatcher for SimpleDispatcher { - define_dispatcher_associated_types!(); - fn add_outputs(&mut self, outputs: impl IntoIterator) { self.output.extend(outputs); assert!(self.output.len() <= 2); } - fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_> { - async move { - // Only barrier is allowed to be dispatched to multiple outputs during migration. - for output in &mut self.output { - output.send(Message::Barrier(barrier.clone())).await?; - } - Ok(()) + async fn dispatch_barrier(&mut self, barrier: Barrier) -> StreamResult<()> { + // Only barrier is allowed to be dispatched to multiple outputs during migration. + for output in &mut self.output { + output.send(Message::Barrier(barrier.clone())).await?; } + Ok(()) } - fn dispatch_data(&mut self, chunk: StreamChunk) -> Self::DataFuture<'_> { - async move { - let output = self - .output - .iter_mut() - .exactly_one() - .expect("expect exactly one output"); + async fn dispatch_data(&mut self, chunk: StreamChunk) -> StreamResult<()> { + let output = self + .output + .iter_mut() + .exactly_one() + .expect("expect exactly one output"); - let chunk = chunk.project(&self.output_indices); - output.send(Message::Chunk(chunk)).await - } + let chunk = chunk.project(&self.output_indices); + output.send(Message::Chunk(chunk)).await } - fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { - async move { - let output = self - .output - .iter_mut() - .exactly_one() - .expect("expect exactly one output"); + async fn dispatch_watermark(&mut self, watermark: Watermark) -> StreamResult<()> { + let output = self + .output + .iter_mut() + .exactly_one() + .expect("expect exactly one output"); - if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { - output.send(Message::Watermark(watermark)).await?; - } - Ok(()) + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + output.send(Message::Watermark(watermark)).await?; } + Ok(()) } fn remove_outputs(&mut self, actor_ids: &HashSet) { @@ -882,6 +902,10 @@ impl Dispatcher for SimpleDispatcher { self.dispatcher_id } + fn dispatcher_id_str(&self) -> &str { + &self.dispatcher_id_str + } + fn is_empty(&self) -> bool { self.output.is_empty() } @@ -1016,6 +1040,7 @@ mod tests { let _schema = Schema { fields: vec![] }; let (tx, rx) = channel_for_test(); let actor_id = 233; + let fragment_id = 666; let input = Box::new(ReceiverExecutor::for_test(rx)); let ctx = Arc::new(SharedContext::for_test()); let metrics = Arc::new(StreamingMetrics::unused()); @@ -1063,6 +1088,7 @@ mod tests { input, vec![broadcast_dispatcher, simple_dispatcher], actor_id, + fragment_id, ctx.clone(), metrics, )) @@ -1247,7 +1273,7 @@ mod tests { }) .collect(); - let chunk = StreamChunk::new(ops, columns, None); + let chunk = StreamChunk::new(ops, columns); hash_dispatcher.dispatch_data(chunk).await.unwrap(); for (output_idx, output) in output_data_vecs.into_iter().enumerate() { @@ -1269,12 +1295,8 @@ mod tests { .for_each(|(real_col, expect_col)| { let real_vals = real_chunk .visibility() - .as_ref() - .unwrap() - .iter() - .enumerate() - .filter(|(_, vis)| *vis) - .map(|(row_idx, _)| real_col.as_int32().value_at(row_idx).unwrap()) + .iter_ones() + .map(|row_idx| real_col.as_int32().value_at(row_idx).unwrap()) .collect::>(); assert_eq!(real_vals.len(), expect_col.len()); assert_eq!(real_vals, *expect_col); diff --git a/src/stream/src/executor/dynamic_filter.rs b/src/stream/src/executor/dynamic_filter.rs index 234ef6db29ab2..ccb55b75c24fc 100644 --- a/src/stream/src/executor/dynamic_filter.rs +++ b/src/stream/src/executor/dynamic_filter.rs @@ -22,10 +22,12 @@ use risingwave_common::bail; use risingwave_common::buffer::{Bitmap, BitmapBuilder}; use risingwave_common::catalog::Schema; use risingwave_common::hash::VnodeBitmapExt; -use risingwave_common::row::{once, OwnedRow as RowData, Row}; +use risingwave_common::row::{self, once, OwnedRow, OwnedRow as RowData, Row}; use risingwave_common::types::{DataType, Datum, DefaultOrd, ScalarImpl, ToDatumRef, ToOwnedDatum}; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_expr::expr::{build_func, BoxedExpression, InputRefExpression, LiteralExpression}; +use risingwave_expr::expr::{ + build_func_non_strict, InputRefExpression, LiteralExpression, NonStrictExpression, +}; use risingwave_pb::expr::expr_node::Type as ExprNodeType; use risingwave_pb::expr::expr_node::Type::{ GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, @@ -42,6 +44,7 @@ use super::{ use crate::common::table::state_table::{StateTable, WatermarkCacheParameterizedStateTable}; use crate::common::StreamChunkBuilder; use crate::executor::expect_first_barrier_from_aligned_stream; +use crate::task::ActorEvalErrorReport; pub struct DynamicFilterExecutor { ctx: ActorContextRef, @@ -94,17 +97,14 @@ impl DynamicFilterExecutor, + condition: Option, ) -> Result<(Vec, Bitmap), StreamExecutorError> { let mut new_ops = Vec::with_capacity(chunk.capacity()); let mut new_visibility = BitmapBuilder::with_capacity(chunk.capacity()); let mut last_res = false; let eval_results = if let Some(cond) = condition { - Some( - cond.eval_infallible(chunk, |err| self.ctx.on_compute_error(err, &self.identity)) - .await, - ) + Some(cond.eval_infallible(chunk).await) } else { None }; @@ -232,7 +232,11 @@ impl DynamicFilterExecutor Result, StreamExecutorError> { // Recover value for RHS if available - let rhs_stream = self.right_table.iter_row(Default::default()).await?; + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); + let rhs_stream = self + .right_table + .iter_with_prefix(row::empty(), sub_range, Default::default()) + .await?; pin_mut!(rhs_stream); if let Some(res) = rhs_stream.next().await { @@ -258,17 +262,24 @@ impl DynamicFilterExecutor DynamicFilterExecutor 0 { - let new_chunk = StreamChunk::new(new_ops, columns, Some(new_visibility)); + let new_chunk = + StreamChunk::with_visibility(new_ops, columns, new_visibility); yield Message::Chunk(new_chunk) } } @@ -381,9 +393,9 @@ impl DynamicFilterExecutor = std::result::Result; @@ -54,9 +53,6 @@ enum ErrorKind { StorageError, ), - #[error("Log store error: {0}")] - LogStoreError(#[source] LogStoreError), - #[error("Chunk operation error: {0}")] ArrayError(#[source] ArrayError), @@ -154,13 +150,6 @@ impl From for StreamExecutorError { } } -/// Log store error -impl From for StreamExecutorError { - fn from(e: LogStoreError) -> Self { - ErrorKind::LogStoreError(e).into() - } -} - /// Chunk operation error. impl From for StreamExecutorError { fn from(e: ArrayError) -> Self { diff --git a/src/stream/src/executor/exchange/input.rs b/src/stream/src/executor/exchange/input.rs index 576542ecfcecd..3804904b7c4f2 100644 --- a/src/stream/src/executor/exchange/input.rs +++ b/src/stream/src/executor/exchange/input.rs @@ -14,7 +14,6 @@ use std::pin::Pin; use std::task::{Context, Poll}; -use std::time::Instant; use anyhow::Context as _; use futures::{pin_mut, Stream}; @@ -149,12 +148,9 @@ impl RemoteInput { .await?; let up_actor_id = up_down_ids.0.to_string(); - let down_actor_id = up_down_ids.1.to_string(); let up_fragment_id = up_down_frag.0.to_string(); let down_fragment_id = up_down_frag.1.to_string(); - let mut rr = 0; - const SAMPLING_FREQUENCY: u64 = 100; let span: await_tree::Span = format!("RemoteInput (actor {up_actor_id})").into(); let mut batched_permits_accumulated = 0; @@ -171,20 +167,7 @@ impl RemoteInput { .with_label_values(&[&up_fragment_id, &down_fragment_id]) .inc_by(bytes as u64); - // add deserialization duration metric with given sampling frequency - let msg_res = if rr % SAMPLING_FREQUENCY == 0 { - let start_time = Instant::now(); - let msg_res = Message::from_protobuf(&msg); - metrics - .actor_sampled_deserialize_duration_ns - .with_label_values(&[&down_actor_id]) - .inc_by(start_time.elapsed().as_nanos() as u64); - msg_res - } else { - Message::from_protobuf(&msg) - }; - rr += 1; - + let msg_res = Message::from_protobuf(&msg); if let Some(add_back_permits) = match permits.unwrap().value { // For records, batch the permits we received to reduce the backward // `AddPermits` messages. diff --git a/src/stream/src/executor/expand.rs b/src/stream/src/executor/expand.rs index a1263d34a46e4..1aed4e15c0463 100644 --- a/src/stream/src/executor/expand.rs +++ b/src/stream/src/executor/expand.rs @@ -67,7 +67,7 @@ impl ExpandExecutor { let (mut columns, vis) = input.data_chunk().keep_columns(subsets).into_parts(); columns.extend(input.columns().iter().cloned()); columns.push(flags); - let chunk = StreamChunk::new(input.ops(), columns, vis.into_visibility()); + let chunk = StreamChunk::with_visibility(input.ops(), columns, vis); yield Message::Chunk(chunk); } } diff --git a/src/stream/src/executor/filter.rs b/src/stream/src/executor/filter.rs index 2a2fca4fe827d..1a1e645e44e6d 100644 --- a/src/stream/src/executor/filter.rs +++ b/src/stream/src/executor/filter.rs @@ -15,11 +15,11 @@ use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use risingwave_common::array::{Array, ArrayImpl, Op, StreamChunk, Vis}; +use risingwave_common::array::{Array, ArrayImpl, Op, StreamChunk}; use risingwave_common::buffer::BitmapBuilder; use risingwave_common::catalog::Schema; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::expr::NonStrictExpression; use super::*; @@ -28,25 +28,25 @@ use super::*; /// `FilterExecutor` will insert, delete or update element into next executor according /// to the result of the expression. pub struct FilterExecutor { - ctx: ActorContextRef, + _ctx: ActorContextRef, info: ExecutorInfo, input: BoxedExecutor, /// Expression of the current filter, note that the filter must always have the same output for /// the same input. - expr: BoxedExpression, + expr: NonStrictExpression, } impl FilterExecutor { pub fn new( ctx: ActorContextRef, input: Box, - expr: BoxedExpression, + expr: NonStrictExpression, executor_id: u64, ) -> Self { let input_info = input.info(); Self { - ctx, + _ctx: ctx, input, info: ExecutorInfo { schema: input_info.schema, @@ -72,10 +72,7 @@ impl FilterExecutor { let mut new_visibility = BitmapBuilder::with_capacity(n); let mut last_res = false; - assert!(match vis { - Vis::Compact(c) => c == n, - Vis::Bitmap(ref m) => m.len() == n, - }); + assert_eq!(vis.len(), n); let ArrayImpl::Bool(bool_array) = &*filter else { panic!("unmatched type: filter expr returns a non-null array"); @@ -127,7 +124,7 @@ impl FilterExecutor { let new_visibility = new_visibility.finish(); Ok(if new_visibility.count_ones() > 0 { - let new_chunk = StreamChunk::new(new_ops, columns, Some(new_visibility)); + let new_chunk = StreamChunk::with_visibility(new_ops, columns, new_visibility); Some(new_chunk) } else { None @@ -173,12 +170,7 @@ impl FilterExecutor { Message::Chunk(chunk) => { let chunk = chunk.compact(); - let pred_output = self - .expr - .eval_infallible(chunk.data_chunk(), |err| { - self.ctx.on_compute_error(err, &self.info.identity) - }) - .await; + let pred_output = self.expr.eval_infallible(chunk.data_chunk()).await; match Self::filter(chunk, pred_output)? { Some(new_chunk) => yield Message::Chunk(new_chunk), @@ -198,8 +190,8 @@ mod tests { use risingwave_common::array::StreamChunk; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; - use risingwave_expr::expr::build_from_pretty; + use super::super::test_utils::expr::build_from_pretty; use super::super::test_utils::MockSource; use super::super::*; use super::*; diff --git a/src/stream/src/executor/flow_control.rs b/src/stream/src/executor/flow_control.rs index 1790f212566b7..45e04717e2a9d 100644 --- a/src/stream/src/executor/flow_control.rs +++ b/src/stream/src/executor/flow_control.rs @@ -16,7 +16,7 @@ use std::fmt::{Debug, Formatter}; use std::num::NonZeroU32; use governor::clock::MonotonicClock; -use governor::{Quota, RateLimiter}; +use governor::{InsufficientCapacity, Quota, RateLimiter}; use risingwave_common::catalog::Schema; use super::*; @@ -58,10 +58,12 @@ impl FlowControlExecutor { let result = rate_limiter .until_n_ready(NonZeroU32::new(chunk.cardinality() as u32).unwrap()) .await; - assert!( - result.is_ok(), - "the capacity of rate_limiter must be larger than the cardinality of chunk" - ); + if let Err(InsufficientCapacity(n)) = result { + tracing::error!( + "Rate Limit {} smaller than chunk cardinality {n}", + self.rate_limit, + ); + } } yield Message::Chunk(chunk); } diff --git a/src/stream/src/executor/hash_agg.rs b/src/stream/src/executor/hash_agg.rs index 610c2857728b3..cb62e8d8f94aa 100644 --- a/src/stream/src/executor/hash_agg.rs +++ b/src/stream/src/executor/hash_agg.rs @@ -12,22 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; use futures::{stream, StreamExt}; use futures_async_stream::try_stream; -use iter_chunks::IterChunks; use itertools::Itertools; -use risingwave_common::array::{Op, StreamChunk}; +use risingwave_common::array::StreamChunk; use risingwave_common::buffer::{Bitmap, BitmapBuilder}; use risingwave_common::catalog::Schema; +use risingwave_common::estimate_size::collections::hashmap::EstimatedHashMap; +use risingwave_common::estimate_size::EstimateSize; use risingwave_common::hash::{HashKey, PrecomputedBuildHasher}; use risingwave_common::types::ScalarImpl; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::{build, AggCall, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{build_retractable, AggCall, BoxedAggregateFunction}; use risingwave_storage::StateStore; use super::agg_common::{AggExecutorArgs, HashAggExecutorExtraArgs}; @@ -52,7 +53,15 @@ use crate::executor::{BoxedMessageStream, Executor, Message}; use crate::task::AtomicU64Ref; type AggGroup = GenericAggGroup; -type AggGroupCache = ManagedLruCache, PrecomputedBuildHasher>; +type BoxedAggGroup = Box>; + +impl EstimateSize for BoxedAggGroup { + fn estimated_heap_size(&self) -> usize { + self.as_ref().estimated_size() + } +} + +type AggGroupCache = ManagedLruCache>, PrecomputedBuildHasher>; /// [`HashAggExecutor`] could process large amounts of data using a state backend. It works as /// follows: @@ -101,11 +110,11 @@ struct ExecutorInner { /// `None` means the agg call need not to maintain a state table by itself. storages: Vec>, - /// State table for the previous result of all agg calls. - /// The outputs of all managed agg states are collected and stored in this + /// Intermediate state table for value-state agg calls. + /// The state of all value-state aggregates are collected and stored in this /// table when `flush_data` is called. /// Also serves as EOWC sort buffer table. - result_table: StateTable, + intermediate_state_table: StateTable, /// State tables for deduplicating rows on distinct key for distinct agg calls. /// One table per distinct column (may be shared by multiple agg calls). @@ -120,6 +129,9 @@ struct ExecutorInner { /// The maximum size of the chunk produced by executor at a time. chunk_size: usize, + /// The maximum heap size of dirty groups. If exceeds, the executor should flush dirty groups. + max_dirty_groups_heap_size: usize, + /// Should emit on window close according to watermark? emit_on_window_close: bool, @@ -130,7 +142,7 @@ impl ExecutorInner { fn all_state_tables_mut(&mut self) -> impl Iterator> { iter_table_storage(&mut self.storages) .chain(self.distinct_dedup_tables.values_mut()) - .chain(std::iter::once(&mut self.result_table)) + .chain(std::iter::once(&mut self.intermediate_state_table)) } } @@ -140,8 +152,8 @@ struct ExecutionVars { /// Cache for [`AggGroup`]s. `HashKey` -> `AggGroup`. agg_group_cache: AggGroupCache, - /// Changed group keys in the current epoch (before next flush). - group_change_set: HashSet, + /// Changed [`AggGroup`]s in the current epoch (before next flush). + dirty_groups: EstimatedHashMap>, /// Distinct deduplicater to deduplicate input rows for each distinct agg call. distinct_dedup: DistinctDeduplicater, @@ -209,7 +221,8 @@ impl HashAggExecutor { let group_key_len = args.extra.group_key_indices.len(); // NOTE: we assume the prefix of table pk is exactly the group key - let group_key_table_pk_projection = &args.result_table.pk_indices()[..group_key_len]; + let group_key_table_pk_projection = + &args.intermediate_state_table.pk_indices()[..group_key_len]; assert!(group_key_table_pk_projection .iter() .sorted() @@ -230,15 +243,16 @@ impl HashAggExecutor { input_schema: input_info.schema, group_key_indices: args.extra.group_key_indices, group_key_table_pk_projection: group_key_table_pk_projection.to_vec().into(), - agg_funcs: args.agg_calls.iter().map(build).try_collect()?, + agg_funcs: args.agg_calls.iter().map(build_retractable).try_collect()?, agg_calls: args.agg_calls, row_count_index: args.row_count_index, storages: args.storages, - result_table: args.result_table, + intermediate_state_table: args.intermediate_state_table, distinct_dedup_tables: args.distinct_dedup_tables, watermark_epoch: args.watermark_epoch, extreme_cache_size: args.extreme_cache_size, chunk_size: args.extra.chunk_size, + max_dirty_groups_heap_size: args.extra.max_dirty_groups_heap_size, emit_on_window_close: args.extra.emit_on_window_close, metrics: args.metrics, }, @@ -250,15 +264,15 @@ impl HashAggExecutor { /// in one chunk. /// /// * `keys`: Hash Keys of rows. - /// * `base_visibility`: Visibility of rows, `None` means all are visible. - fn get_group_visibilities(keys: Vec, base_visibility: Option<&Bitmap>) -> Vec<(K, Bitmap)> { + /// * `base_visibility`: Visibility of rows. + fn get_group_visibilities(keys: Vec, base_visibility: &Bitmap) -> Vec<(K, Bitmap)> { let n_rows = keys.len(); let mut vis_builders = HashMap::new(); - for (row_idx, key) in keys.into_iter().enumerate().filter(|(row_idx, _)| { - base_visibility - .map(|vis| vis.is_set(*row_idx)) - .unwrap_or(true) - }) { + for (row_idx, key) in keys + .into_iter() + .enumerate() + .filter(|(row_idx, _)| base_visibility.is_set(*row_idx)) + { vis_builders .entry(key) .or_insert_with(|| BitmapBuilder::zeroed(n_rows)) @@ -270,53 +284,70 @@ impl HashAggExecutor { .collect() } - async fn ensure_keys_in_cache( + /// Touch the [`AggGroup`]s for the given keys, which means move them from cache to the `dirty_groups` map. + /// If the [`AggGroup`] doesn't exist in the cache before, it will be created or recovered from state table. + async fn touch_agg_groups( this: &ExecutorInner, - cache: &mut AggGroupCache, + vars: &mut ExecutionVars, keys: impl IntoIterator, - stats: &mut ExecutionStats, ) -> StreamExecutorResult<()> { let group_key_types = &this.info.schema.data_types()[..this.group_key_indices.len()]; let futs = keys .into_iter() .filter_map(|key| { - stats.total_lookup_count += 1; - if cache.contains(key) { - None - } else { - stats.lookup_miss_count += 1; - Some(async { - // Create `AggGroup` for the current group if not exists. This will - // fetch previous agg result from the result table. - let agg_group = AggGroup::create( - Some(GroupKey::new( - key.deserialize(group_key_types)?, - Some(this.group_key_table_pk_projection.clone()), - )), - &this.agg_calls, - &this.agg_funcs, - &this.storages, - &this.result_table, - &this.input_pk_indices, - this.row_count_index, - this.extreme_cache_size, - &this.input_schema, - ) - .await?; - Ok::<_, StreamExecutorError>((key.clone(), agg_group)) - }) + vars.stats.total_lookup_count += 1; + if vars.dirty_groups.contains_key(key) { + // already dirty + return None; + } + match vars.agg_group_cache.get_mut(key) { + Some(mut agg_group) => { + let agg_group: &mut Option<_> = &mut agg_group; + assert!( + agg_group.is_some(), + "invalid state: AggGroup is None in cache but not dirty" + ); + // move from cache to `dirty_groups` + vars.dirty_groups + .insert(key.clone(), agg_group.take().unwrap()); + None // no need to create + } + None => { + vars.stats.lookup_miss_count += 1; + Some(async { + // Create `AggGroup` for the current group if not exists. This will + // restore agg states from the intermediate state table. + let agg_group = AggGroup::create( + Some(GroupKey::new( + key.deserialize(group_key_types)?, + Some(this.group_key_table_pk_projection.clone()), + )), + &this.agg_calls, + &this.agg_funcs, + &this.storages, + &this.intermediate_state_table, + &this.input_pk_indices, + this.row_count_index, + this.extreme_cache_size, + &this.input_schema, + ) + .await?; + Ok::<_, StreamExecutorError>((key.clone(), Box::new(agg_group))) + }) + } } }) .collect_vec(); // collect is necessary to avoid lifetime issue of `agg_group_cache` - stats.chunk_total_lookup_count += 1; + vars.stats.chunk_total_lookup_count += 1; if !futs.is_empty() { // If not all the required states/keys are in the cache, it's a chunk-level cache miss. - stats.chunk_lookup_miss_count += 1; + vars.stats.chunk_lookup_miss_count += 1; let mut buffered = stream::iter(futs).buffer_unordered(10).fuse(); while let Some(result) = buffered.next().await { let (key, agg_group) = result?; - cache.put(key, agg_group); + let none = vars.dirty_groups.insert(key, agg_group); + debug_assert!(none.is_none()); } } Ok(()) @@ -331,20 +362,13 @@ impl HashAggExecutor { let keys = K::build(&this.group_key_indices, chunk.data_chunk())?; let group_visibilities = Self::get_group_visibilities(keys, chunk.visibility()); - // Create `AggGroup` for each group if not exists. - Self::ensure_keys_in_cache( - this, - &mut vars.agg_group_cache, - group_visibilities.iter().map(|(k, _)| k), - &mut vars.stats, - ) - .await?; + // Ensure all `AggGroup`s are in `dirty_groups`. + Self::touch_agg_groups(this, vars, group_visibilities.iter().map(|(k, _)| k)).await?; // Calculate the row visibility for every agg call. let mut call_visibilities = Vec::with_capacity(this.agg_calls.len()); for agg_call in &this.agg_calls { - let agg_call_filter_res = - agg_call_filter_res(&this.actor_ctx, &this.info.identity, agg_call, &chunk).await?; + let agg_call_filter_res = agg_call_filter_res(agg_call, &chunk).await?; call_visibilities.push(agg_call_filter_res); } @@ -362,7 +386,8 @@ impl HashAggExecutor { // Apply chunk to each of the state (per agg_call), for each group. for (key, visibility) in group_visibilities { - let mut agg_group = vars.agg_group_cache.get_mut(&key).unwrap(); + let agg_group: &mut BoxedAggGroup<_> = &mut vars.dirty_groups.get_mut(&key).unwrap(); + let visibilities = call_visibilities .iter() .map(|call_vis| call_vis & &visibility) @@ -390,136 +415,111 @@ impl HashAggExecutor { agg_group .apply_chunk(&chunk, &this.agg_calls, &this.agg_funcs, visibilities) .await?; - // Mark the group as changed. - vars.group_change_set.insert(key); } - Ok(()) - } - - #[try_stream(ok = StreamChunk, error = StreamExecutorError)] - async fn flush_data<'a>( - this: &'a mut ExecutorInner, - vars: &'a mut ExecutionVars, - epoch: EpochPair, - ) { - // Update metrics. + // Update the metrics. let actor_id_str = this.actor_ctx.id.to_string(); - let table_id_str = this.result_table.table_id().to_string(); - this.metrics - .agg_lookup_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) - .inc_by(vars.stats.lookup_miss_count); - vars.stats.lookup_miss_count = 0; - this.metrics - .agg_total_lookup_count - .with_label_values(&[&table_id_str, &actor_id_str]) - .inc_by(vars.stats.total_lookup_count); - vars.stats.total_lookup_count = 0; + let fragment_id_str = this.actor_ctx.fragment_id.to_string(); + let table_id_str = this.intermediate_state_table.table_id().to_string(); this.metrics - .agg_cached_keys - .with_label_values(&[&table_id_str, &actor_id_str]) - .set(vars.agg_group_cache.len() as i64); - this.metrics - .agg_chunk_lookup_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) - .inc_by(vars.stats.chunk_lookup_miss_count); - vars.stats.chunk_lookup_miss_count = 0; + .agg_dirty_groups_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .set(vars.dirty_groups.len() as i64); this.metrics - .agg_chunk_total_lookup_count - .with_label_values(&[&table_id_str, &actor_id_str]) - .inc_by(vars.stats.chunk_total_lookup_count); - vars.stats.chunk_total_lookup_count = 0; + .agg_dirty_groups_heap_size + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .set(vars.dirty_groups.estimated_heap_size() as i64); - let window_watermark = vars.window_watermark.take(); - let n_dirty_group = vars.group_change_set.len(); - - let futs_of_all_groups = vars - .group_change_set - .drain() - .map(|key| { - // Get agg group of the key. - vars.agg_group_cache - .get_mut_unsafe(&key) - .expect("changed group must have corresponding AggGroup") - }) - .map(|mut agg_group| { - let storages = &this.storages; - let funcs = &this.agg_funcs; - // SAFETY: - // 1. `key`s in `keys_in_batch` are unique by nature, because they're - // from `group_change_set` which is a set. - // - // 2. `MutGuard` should not be sent to other tasks. - let mut agg_group = unsafe { agg_group.as_mut_guard() }; - async move { - // Build aggregate result change. - agg_group.build_change(storages, funcs).await - } - }); + Ok(()) + } - // TODO(rc): figure out a more reasonable concurrency limit. - const MAX_CONCURRENT_TASKS: usize = 100; - let mut futs_batches = IterChunks::chunks(futs_of_all_groups, MAX_CONCURRENT_TASKS); - while let Some(futs) = futs_batches.next() { - // Compute agg result changes for each group, and emit changes accordingly. - let changes = futures::future::try_join_all(futs).await?; + #[try_stream(ok = StreamChunk, error = StreamExecutorError)] + async fn flush_data<'a>(this: &'a mut ExecutorInner, vars: &'a mut ExecutionVars) { + let window_watermark = vars.window_watermark.take(); - // Emit from changes + // flush changed states into intermediate state table + for agg_group in vars.dirty_groups.values() { + let encoded_states = agg_group.encode_states(&this.agg_funcs)?; if this.emit_on_window_close { - for change in changes.into_iter().flatten() { - // For EOWC, write change to the sort buffer. - vars.buffer.apply_change(change, &mut this.result_table); - } + vars.buffer + .update_without_old_value(encoded_states, &mut this.intermediate_state_table); } else { - for change in changes.into_iter().flatten() { - // For EOU, write change to result table and directly yield the change. - this.result_table.write_record(change.as_ref()); - if let Some(chunk) = vars.chunk_builder.append_record(change) { - yield chunk; - } - } + this.intermediate_state_table + .update_without_old_value(encoded_states); } } - // Emit remaining results from result table. if this.emit_on_window_close { + // remove all groups under watermark and emit their results if let Some(watermark) = window_watermark.as_ref() { #[for_await] for row in vars .buffer - .consume(watermark.clone(), &mut this.result_table) + .consume(watermark.clone(), &mut this.intermediate_state_table) { let row = row?; - if let Some(chunk) = vars.chunk_builder.append_row(Op::Insert, row) { + let group_key = row + .clone() + .into_iter() + .take(this.group_key_indices.len()) + .collect(); + let states = row.into_iter().skip(this.group_key_indices.len()).collect(); + + let mut agg_group = AggGroup::create_eowc( + Some(GroupKey::new( + group_key, + Some(this.group_key_table_pk_projection.clone()), + )), + &this.agg_calls, + &this.agg_funcs, + &this.storages, + &states, + &this.input_pk_indices, + this.row_count_index, + this.extreme_cache_size, + &this.input_schema, + )?; + + let change = agg_group + .build_change(&this.storages, &this.agg_funcs) + .await?; + if let Some(change) = change { + if let Some(chunk) = vars.chunk_builder.append_record(change) { + yield chunk; + } + } + } + } + } else { + // emit on update + // TODO(wrj,rc): we may need to parallelize it and set a reasonable concurrency limit. + for mut agg_group in vars.dirty_groups.values_mut() { + let agg_group = agg_group.as_mut(); + let change = agg_group + .build_change(&this.storages, &this.agg_funcs) + .await?; + if let Some(change) = change { + if let Some(chunk) = vars.chunk_builder.append_record(change) { yield chunk; } } } } + // move dirty groups back to cache + for (key, agg_group) in vars.dirty_groups.drain() { + vars.agg_group_cache.put(key, Some(agg_group)); + } + // Yield the remaining rows in chunk builder. if let Some(chunk) = vars.chunk_builder.take() { yield chunk; } - if n_dirty_group == 0 && window_watermark.is_none() { - // Nothing is expected to be changed. - this.all_state_tables_mut().for_each(|table| { - table.commit_no_data_expected(epoch); - }); - } else { - if let Some(watermark) = window_watermark { - // Update watermark of state tables, for state cleaning. - this.all_state_tables_mut() - .for_each(|table| table.update_watermark(watermark.clone(), false)); - } - // Commit all state tables. - futures::future::try_join_all( - this.all_state_tables_mut() - .map(|table| async { table.commit(epoch).await }), - ) - .await?; + if let Some(watermark) = window_watermark { + // Update watermark of state tables, for state cleaning. + this.all_state_tables_mut() + .for_each(|table| table.update_watermark(watermark.clone(), false)); } // Flush distinct dedup state. @@ -530,6 +530,44 @@ impl HashAggExecutor { vars.agg_group_cache.evict(); } + fn update_metrics(this: &ExecutorInner, vars: &mut ExecutionVars) { + let actor_id_str = this.actor_ctx.id.to_string(); + let fragment_id_str = this.actor_ctx.fragment_id.to_string(); + let table_id_str = this.intermediate_state_table.table_id().to_string(); + this.metrics + .agg_lookup_miss_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .inc_by(std::mem::take(&mut vars.stats.lookup_miss_count)); + this.metrics + .agg_total_lookup_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .inc_by(std::mem::take(&mut vars.stats.total_lookup_count)); + this.metrics + .agg_cached_entry_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .set(vars.agg_group_cache.len() as i64); + this.metrics + .agg_chunk_lookup_miss_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .inc_by(std::mem::take(&mut vars.stats.chunk_lookup_miss_count)); + this.metrics + .agg_chunk_total_lookup_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .inc_by(std::mem::take(&mut vars.stats.chunk_total_lookup_count)); + } + + async fn commit_state_tables( + this: &mut ExecutorInner, + epoch: EpochPair, + ) -> StreamExecutorResult<()> { + futures::future::try_join_all( + this.all_state_tables_mut() + .map(|table| async { table.commit(epoch).await }), + ) + .await?; + Ok(()) + } + #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(self) { let HashAggExecutor { @@ -537,14 +575,14 @@ impl HashAggExecutor { inner: mut this, } = self; - let window_col_idx_in_group_key = this.result_table.pk_indices()[0]; + let window_col_idx_in_group_key = this.intermediate_state_table.pk_indices()[0]; let window_col_idx = this.group_key_indices[window_col_idx_in_group_key]; let agg_group_cache_metrics_info = MetricsInfo::new( this.metrics.clone(), - this.result_table.table_id(), + this.intermediate_state_table.table_id(), this.actor_ctx.id, - "agg result table", + "agg intermediate state table", ); let mut vars = ExecutionVars { @@ -554,7 +592,7 @@ impl HashAggExecutor { agg_group_cache_metrics_info, PrecomputedBuildHasher, ), - group_change_set: HashSet::new(), + dirty_groups: Default::default(), distinct_dedup: DistinctDeduplicater::new( &this.agg_calls, &this.watermark_epoch, @@ -565,7 +603,7 @@ impl HashAggExecutor { buffered_watermarks: vec![None; this.group_key_indices.len()], window_watermark: None, chunk_builder: StreamChunkBuilder::new(this.chunk_size, this.info.schema.data_types()), - buffer: SortBuffer::new(window_col_idx_in_group_key, &this.result_table), + buffer: SortBuffer::new(window_col_idx_in_group_key, &this.intermediate_state_table), }; // TODO(rc): use something like a `ColumnMapping` type @@ -593,7 +631,7 @@ impl HashAggExecutor { #[for_await] for msg in input { let msg = msg?; - vars.agg_group_cache.evict_except_cur_epoch(); + vars.agg_group_cache.evict(); match msg { Message::Watermark(watermark) => { let group_key_seq = group_key_invert_idx[watermark.col_idx]; @@ -607,12 +645,23 @@ impl HashAggExecutor { } Message::Chunk(chunk) => { Self::apply_chunk(&mut this, &mut vars, chunk).await?; + + if vars.dirty_groups.estimated_heap_size() >= this.max_dirty_groups_heap_size { + // flush dirty groups if heap size is too large, to better prevent from OOM + #[for_await] + for chunk in Self::flush_data(&mut this, &mut vars) { + yield Message::Chunk(chunk?); + } + } } Message::Barrier(barrier) => { + Self::update_metrics(&this, &mut vars); + #[for_await] - for chunk in Self::flush_data(&mut this, &mut vars, barrier.epoch) { + for chunk in Self::flush_data(&mut this, &mut vars) { yield Message::Chunk(chunk?); } + Self::commit_state_tables(&mut this, barrier.epoch).await?; if this.emit_on_window_close { // ignore watermarks on other columns @@ -631,7 +680,7 @@ impl HashAggExecutor { // Update the vnode bitmap for state tables of all agg calls if asked. if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(this.actor_ctx.id) { - let previous_vnode_bitmap = this.result_table.vnodes().clone(); + let previous_vnode_bitmap = this.intermediate_state_table.vnodes().clone(); this.all_state_tables_mut().for_each(|table| { let _ = table.update_vnode_bitmap(vnode_bitmap.clone()); }); diff --git a/src/stream/src/executor/hash_join.rs b/src/stream/src/executor/hash_join.rs index 31705e792909a..75414fe24a379 100644 --- a/src/stream/src/executor/hash_join.rs +++ b/src/stream/src/executor/hash_join.rs @@ -28,7 +28,7 @@ use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::types::{DataType, DefaultOrd, ToOwnedDatum}; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::expr::NonStrictExpression; use risingwave_expr::ExprError; use risingwave_storage::StateStore; use tokio::time::Instant; @@ -202,11 +202,11 @@ impl std::fmt::Debug for JoinSide { impl JoinSide { // WARNING: Please do not call this until we implement it. - #[expect(dead_code)] fn is_dirty(&self) -> bool { unimplemented!() } + #[expect(dead_code)] fn clear_cache(&mut self) { assert!( !self.is_dirty(), @@ -242,9 +242,9 @@ pub struct HashJoinExecutor, /// Optional non-equi join conditions - cond: Option, + cond: Option, /// Column indices of watermark output and offset expression of each inequality, respectively. - inequality_pairs: Vec<(Vec, Option)>, + inequality_pairs: Vec<(Vec, Option)>, /// The output watermark of each inequality condition and its value is the minimum of the /// calculation result of both side. It will be used to generate watermark into downstream /// and do state cleaning if `clean_state` field of that inequality is `true`. @@ -310,11 +310,10 @@ struct HashJoinChunkBuilder { ctx: &'a ActorContextRef, - identity: &'a str, side_l: &'a mut JoinSide, side_r: &'a mut JoinSide, actual_output_data_types: &'a [DataType], - cond: &'a mut Option, + cond: &'a mut Option, inequality_watermarks: &'a [Option], chunk: StreamChunk, append_only_optimize: bool, @@ -449,8 +448,8 @@ impl HashJoinExecutor, executor_id: u64, - cond: Option, - inequality_pairs: Vec<(usize, usize, bool, Option)>, + cond: Option, + inequality_pairs: Vec<(usize, usize, bool, Option)>, op_info: String, state_table_l: StateTable, degree_state_table_l: StateTable, @@ -640,6 +639,7 @@ impl HashJoinExecutor HashJoinExecutor HashJoinExecutor HashJoinExecutor { @@ -745,7 +747,6 @@ impl HashJoinExecutor HashJoinExecutor { @@ -772,7 +773,6 @@ impl HashJoinExecutor HashJoinExecutor { @@ -814,23 +814,15 @@ impl HashJoinExecutor250ms). - // Those will result in that barrier is always ready - // in source. Since select barrier is preferred, - // chunk would never be selected. - // self.metrics - // .join_cached_rows - // .with_label_values(&[&actor_id_str, side]) - // .set(ht.cached_rows() as i64); self.metrics - .join_cached_entries - .with_label_values(&[&actor_id_str, side]) + .join_cached_entry_count + .with_label_values(&[&actor_id_str, &fragment_id_str, side]) .set(ht.entry_count() as i64); } self.metrics .join_match_duration_ns - .with_label_values(&[&actor_id_str, "barrier"]) + .with_label_values(&[&actor_id_str, &fragment_id_str, "barrier"]) .inc_by(barrier_start_time.elapsed().as_nanos() as u64); yield Message::Barrier(barrier); } @@ -920,7 +912,7 @@ impl HashJoinExecutor input_watermark.val = value.unwrap(), @@ -990,7 +982,6 @@ impl HashJoinExecutor(args: EqJoinArgs<'_, K, S>) { let EqJoinArgs { ctx, - identity, side_l, side_r, actual_output_data_types, @@ -1000,10 +991,9 @@ impl HashJoinExecutor HashJoinExecutor = if side_update @@ -1079,12 +1072,10 @@ impl HashJoinExecutor HashJoinExecutor) -> BoxedExpression { + fn create_cond(condition_text: Option) -> NonStrictExpression { build_from_pretty( condition_text .as_deref() @@ -1350,7 +1339,7 @@ mod tests { with_condition: bool, null_safe: bool, condition_text: Option, - inequality_pairs: Vec<(usize, usize, bool, Option)>, + inequality_pairs: Vec<(usize, usize, bool, Option)>, ) -> (MessageSender, MessageSender, BoxedMessageStream) { let schema = Schema { fields: vec![ diff --git a/src/stream/src/executor/hop_window.rs b/src/stream/src/executor/hop_window.rs index aa1840aa832ce..42d13d790da88 100644 --- a/src/stream/src/executor/hop_window.rs +++ b/src/stream/src/executor/hop_window.rs @@ -17,9 +17,9 @@ use std::num::NonZeroUsize; use futures::StreamExt; use futures_async_stream::try_stream; use itertools::Itertools; -use risingwave_common::array::{DataChunk, Op, Vis}; +use risingwave_common::array::{DataChunk, Op}; use risingwave_common::types::Interval; -use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::expr::NonStrictExpression; use risingwave_expr::ExprError; use super::error::StreamExecutorError; @@ -27,14 +27,14 @@ use super::{ActorContextRef, BoxedExecutor, Executor, ExecutorInfo, Message}; use crate::common::StreamChunkBuilder; pub struct HopWindowExecutor { - ctx: ActorContextRef, + _ctx: ActorContextRef, pub input: BoxedExecutor, pub info: ExecutorInfo, pub time_col_idx: usize, pub window_slide: Interval, pub window_size: Interval, - window_start_exprs: Vec, - window_end_exprs: Vec, + window_start_exprs: Vec, + window_end_exprs: Vec, pub output_indices: Vec, chunk_size: usize, } @@ -48,13 +48,13 @@ impl HopWindowExecutor { time_col_idx: usize, window_slide: Interval, window_size: Interval, - window_start_exprs: Vec, - window_end_exprs: Vec, + window_start_exprs: Vec, + window_end_exprs: Vec, output_indices: Vec, chunk_size: usize, ) -> Self { HopWindowExecutor { - ctx, + _ctx: ctx, input, info, time_col_idx, @@ -90,13 +90,11 @@ impl HopWindowExecutor { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(self: Box) { let Self { - ctx, input, window_slide, window_size, output_indices, - info, time_col_idx, chunk_size, @@ -142,7 +140,7 @@ impl HopWindowExecutor { let chunk = chunk.compact(); let (data_chunk, ops) = chunk.into_parts(); // SAFETY: Already compacted. - assert!(matches!(data_chunk.vis(), Vis::Compact(_))); + assert!(data_chunk.is_compacted()); let len = data_chunk.cardinality(); // Collect each window's data into a chunk. @@ -152,22 +150,14 @@ impl HopWindowExecutor { let window_start_col = if out_window_start_col_idx.is_some() { Some( self.window_start_exprs[i] - .eval_infallible(&data_chunk, |err| { - ctx.on_compute_error(err, &info.identity) - }) + .eval_infallible(&data_chunk) .await, ) } else { None }; let window_end_col = if out_window_end_col_idx.is_some() { - Some( - self.window_end_exprs[i] - .eval_infallible(&data_chunk, |err| { - ctx.on_compute_error(err, &info.identity) - }) - .await, - ) + Some(self.window_end_exprs[i].eval_infallible(&data_chunk).await) } else { None }; @@ -228,9 +218,7 @@ impl HopWindowExecutor { { let w = w .clone() - .transform_with_expr(start_expr, out_start_idx, |err| { - ctx.on_compute_error(err, &info.identity) - }) + .transform_with_expr(start_expr, out_start_idx) .await; if let Some(w) = w { yield Message::Watermark(w); @@ -239,11 +227,7 @@ impl HopWindowExecutor { if let (Some(out_end_idx), Some(end_expr)) = (out_window_end_col_idx, self.window_end_exprs.get(0)) { - let w = w - .transform_with_expr(end_expr, out_end_idx, |err| { - ctx.on_compute_error(err, &info.identity) - }) - .await; + let w = w.transform_with_expr(end_expr, out_end_idx).await; if let Some(w) = w { yield Message::Watermark(w); } @@ -267,6 +251,7 @@ mod tests { use risingwave_common::types::test_utils::IntervalTestExt; use risingwave_common::types::{DataType, Interval}; use risingwave_expr::expr::test_utils::make_hop_window_expression; + use risingwave_expr::expr::NonStrictExpression; use crate::executor::test_utils::MockSource; use crate::executor::{ActorContext, Executor, ExecutorInfo, StreamChunk}; @@ -290,7 +275,7 @@ mod tests { U+ 6 2 ^10:42:00 - 7 1 ^10:51:00 + 8 3 ^11:02:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ); let input = MockSource::with_chunks(schema.clone(), pk_indices.clone(), vec![chunk]).boxed(); @@ -318,8 +303,14 @@ mod tests { 2, window_slide, window_size, - window_start_exprs, - window_end_exprs, + window_start_exprs + .into_iter() + .map(NonStrictExpression::for_test) + .collect(), + window_end_exprs + .into_iter() + .map(NonStrictExpression::for_test) + .collect(), output_indices, CHUNK_SIZE, ) @@ -354,7 +345,7 @@ mod tests { - 7 1 ^10:51:00 ^10:45:00 ^11:15:00 + 8 3 ^11:02:00 ^10:45:00 ^11:15:00 + 8 3 ^11:02:00 ^11:00:00 ^11:30:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); } @@ -387,7 +378,7 @@ mod tests { - ^11:15:00 1 7 ^10:51:00 + ^11:15:00 3 8 ^11:02:00 + ^11:30:00 3 8 ^11:02:00" - .replace('^', "2022-2-2T"), + .replace('^', "2022-02-02T"), ) ); } diff --git a/src/stream/src/executor/integration_tests.rs b/src/stream/src/executor/integration_tests.rs index 34dc4c295fe1e..cd505093294f1 100644 --- a/src/stream/src/executor/integration_tests.rs +++ b/src/stream/src/executor/integration_tests.rs @@ -14,14 +14,13 @@ use std::sync::{Arc, Mutex}; -use anyhow::Context; use futures::StreamExt; use futures_async_stream::try_stream; use multimap::MultiMap; use risingwave_common::array::*; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::*; -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_expr::expr::*; use risingwave_storage::memory::MemoryStateStore; @@ -112,6 +111,7 @@ async fn test_merger_sum_aggr() { 0, ))], 0, + 0, ctx, metrics, ); @@ -152,7 +152,7 @@ async fn test_merger_sum_aggr() { vec![], vec![ // TODO: use the new streaming_if_null expression here, and add `None` tests - Box::new(InputRefExpression::new(DataType::Int64, 1)), + NonStrictExpression::for_test(InputRefExpression::new(DataType::Int64, 1)), ], 3, MultiMap::new(), @@ -187,7 +187,6 @@ async fn test_merger_sum_aggr() { let chunk = StreamChunk::new( vec![op; i], vec![I64Array::from_iter(vec![1; i]).into_ref()], - None, ); input.send(Message::Chunk(chunk)).await.unwrap(); } @@ -259,7 +258,7 @@ impl StreamConsumer for SenderConsumer { let msg = item?; let barrier = msg.as_barrier().cloned(); - channel.send(msg).await.context("failed to send message")?; + channel.send(msg).await.expect("failed to send message"); if let Some(barrier) = barrier { yield barrier; diff --git a/src/stream/src/executor/lookup/impl_.rs b/src/stream/src/executor/lookup/impl_.rs index 463cec2d5f6bf..e7f39c0247bf9 100644 --- a/src/stream/src/executor/lookup/impl_.rs +++ b/src/stream/src/executor/lookup/impl_.rs @@ -322,7 +322,7 @@ impl LookupExecutor { .lookup_one_row(&row, self.last_barrier.as_ref().unwrap().epoch) .await? { - tracing::trace!(target: "events::stream::lookup::put", "{:?} {:?}", row, matched_row); + tracing::debug!(target: "events::stream::lookup::put", "{:?} {:?}", row, matched_row); if let Some(chunk) = builder.append_row(*op, row, &matched_row) { yield Message::Chunk(chunk); @@ -371,10 +371,11 @@ impl LookupExecutor { .into_owned_row(); let table_id_str = self.arrangement.storage_table.table_id().to_string(); let actor_id_str = self.ctx.id.to_string(); + let fragment_id_str = self.ctx.fragment_id.to_string(); self.ctx .streaming_metrics .lookup_total_query_cache_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); if let Some(result) = self.lookup_cache.lookup(&lookup_row) { return Ok(result.iter().cloned().collect_vec()); @@ -384,10 +385,10 @@ impl LookupExecutor { self.ctx .streaming_metrics .lookup_cache_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); - tracing::trace!(target: "events::stream::lookup::lookup_row", "{:?}", lookup_row); + tracing::debug!(target: "events::stream::lookup::lookup_row", "{:?}", lookup_row); let mut all_rows = VecWithKvSize::new(); // Drop the stream. @@ -426,14 +427,14 @@ impl LookupExecutor { } } - tracing::trace!(target: "events::stream::lookup::result", "{:?} => {:?}", lookup_row, all_rows.inner()); + tracing::debug!(target: "events::stream::lookup::result", "{:?} => {:?}", lookup_row, all_rows.inner()); self.lookup_cache.batch_update(lookup_row, all_rows.clone()); self.ctx .streaming_metrics .lookup_cached_entry_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .set(self.lookup_cache.len() as i64); Ok(all_rows.into_inner()) diff --git a/src/stream/src/executor/managed_state/join/mod.rs b/src/stream/src/executor/managed_state/join/mod.rs index 7ee23c06a5631..b7a81a0f75745 100644 --- a/src/stream/src/executor/managed_state/join/mod.rs +++ b/src/stream/src/executor/managed_state/join/mod.rs @@ -15,7 +15,7 @@ mod join_entry_state; use std::alloc::Global; -use std::ops::{Deref, DerefMut}; +use std::ops::{Bound, Deref, DerefMut}; use std::sync::Arc; use futures::future::try_join; @@ -40,7 +40,7 @@ use crate::common::metrics::MetricsInfo; use crate::common::table::state_table::StateTable; use crate::executor::error::StreamExecutorResult; use crate::executor::monitor::StreamingMetrics; -use crate::task::{ActorId, AtomicU64Ref}; +use crate::task::{ActorId, AtomicU64Ref, FragmentId}; type DegreeType = u64; @@ -161,6 +161,7 @@ pub struct JoinHashMapMetrics { metrics: Arc, /// Basic information actor_id: String, + fragment_id: String, join_table_id: String, degree_table_id: String, side: &'static str, @@ -175,6 +176,7 @@ impl JoinHashMapMetrics { pub fn new( metrics: Arc, actor_id: ActorId, + fragment_id: FragmentId, side: &'static str, join_table_id: u32, degree_table_id: u32, @@ -182,6 +184,7 @@ impl JoinHashMapMetrics { Self { metrics, actor_id: actor_id.to_string(), + fragment_id: fragment_id.to_string(), join_table_id: join_table_id.to_string(), degree_table_id: degree_table_id.to_string(), side, @@ -193,23 +196,25 @@ impl JoinHashMapMetrics { pub fn flush(&mut self) { self.metrics - .join_lookup_miss_count + .join_lookup_total_count .with_label_values(&[ (self.side), &self.join_table_id, &self.degree_table_id, &self.actor_id, + &self.fragment_id, ]) - .inc_by(self.lookup_miss_count as u64); + .inc_by(self.total_lookup_count as u64); self.metrics - .join_total_lookup_count + .join_lookup_miss_count .with_label_values(&[ (self.side), &self.join_table_id, &self.degree_table_id, &self.actor_id, + &self.fragment_id, ]) - .inc_by(self.total_lookup_count as u64); + .inc_by(self.lookup_miss_count as u64); self.metrics .join_insert_cache_miss_count .with_label_values(&[ @@ -217,6 +222,7 @@ impl JoinHashMapMetrics { &self.join_table_id, &self.degree_table_id, &self.actor_id, + &self.fragment_id, ]) .inc_by(self.insert_cache_miss_count as u64); self.total_lookup_count = 0; @@ -284,6 +290,7 @@ impl JoinHashMap { pk_contained_in_jk: bool, metrics: Arc, actor_id: ActorId, + fragment_id: FragmentId, side: &'static str, ) -> Self { let alloc = StatsAlloc::new(Global).shared(); @@ -335,6 +342,7 @@ impl JoinHashMap { metrics: JoinHashMapMetrics::new( metrics, actor_id, + fragment_id, side, join_table_id, degree_table_id, @@ -402,14 +410,18 @@ impl JoinHashMap { let mut entry_state = JoinEntryState::default(); if self.need_degree_table { - let table_iter_fut = self - .state - .table - .iter_row_with_pk_prefix(&key, PrefetchOptions::new_for_exhaust_iter()); - let degree_table_iter_fut = self - .degree_state - .table - .iter_row_with_pk_prefix(&key, PrefetchOptions::new_for_exhaust_iter()); + let sub_range: &(Bound, Bound) = + &(Bound::Unbounded, Bound::Unbounded); + let table_iter_fut = self.state.table.iter_with_prefix( + &key, + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ); + let degree_table_iter_fut = self.degree_state.table.iter_with_prefix( + &key, + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ); let (table_iter, degree_table_iter) = try_join(table_iter_fut, degree_table_iter_fut).await?; @@ -437,10 +449,12 @@ impl JoinHashMap { ); } } else { + let sub_range: &(Bound, Bound) = + &(Bound::Unbounded, Bound::Unbounded); let table_iter = self .state .table - .iter_row_with_pk_prefix(&key, PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix(&key, sub_range, PrefetchOptions::new_for_exhaust_iter()) .await?; #[for_await] diff --git a/src/stream/src/executor/merge.rs b/src/stream/src/executor/merge.rs index dad7bf4ea3abb..f2f7d84ca2a3d 100644 --- a/src/stream/src/executor/merge.rs +++ b/src/stream/src/executor/merge.rs @@ -114,6 +114,7 @@ impl MergeExecutor { let select_all = SelectReceivers::new(self.actor_context.id, self.upstreams); let actor_id = self.actor_context.id; let actor_id_str = actor_id.to_string(); + let fragment_id_str = self.fragment_id.to_string(); let mut upstream_fragment_id_str = self.upstream_fragment_id.to_string(); // Channels that're blocked by the barrier to align. @@ -122,7 +123,7 @@ impl MergeExecutor { while let Some(msg) = select_all.next().await { self.metrics .actor_input_buffer_blocking_duration_ns - .with_label_values(&[&actor_id_str, &upstream_fragment_id_str]) + .with_label_values(&[&actor_id_str, &fragment_id_str, &upstream_fragment_id_str]) .inc_by(start_time.elapsed().as_nanos() as u64); let mut msg: Message = msg?; @@ -133,11 +134,11 @@ impl MergeExecutor { Message::Chunk(chunk) => { self.metrics .actor_in_record_cnt - .with_label_values(&[&actor_id_str]) + .with_label_values(&[&actor_id_str, &fragment_id_str]) .inc_by(chunk.cardinality() as _); } Message::Barrier(barrier) => { - tracing::trace!( + tracing::debug!( target: "events::stream::barrier::path", actor_id = actor_id, "receiver receives barrier from path: {:?}", @@ -466,7 +467,7 @@ mod tests { fn build_test_chunk(epoch: u64) -> StreamChunk { // The number of items in `ops` is the epoch count. let ops = vec![Op::Insert; epoch as usize]; - StreamChunk::new(ops, vec![], None) + StreamChunk::new(ops, vec![]) } #[tokio::test] diff --git a/src/stream/src/executor/mod.rs b/src/stream/src/executor/mod.rs index 8fa7a5d818cc4..c28d6ec8564d9 100644 --- a/src/stream/src/executor/mod.rs +++ b/src/stream/src/executor/mod.rs @@ -26,14 +26,13 @@ use risingwave_common::array::StreamChunk; use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::Schema; use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, DefaultOrd, ScalarImpl}; +use risingwave_common::types::{DataType, Datum, DefaultOrd, ScalarImpl}; use risingwave_common::util::epoch::{Epoch, EpochPair}; use risingwave_common::util::tracing::TracingContext; -use risingwave_common::util::value_encoding::{deserialize_datum, serialize_datum}; +use risingwave_common::util::value_encoding::{DatumFromProtoExt, DatumToProtoExt}; use risingwave_connector::source::SplitImpl; -use risingwave_expr::expr::BoxedExpression; -use risingwave_expr::ExprError; -use risingwave_pb::data::{PbDatum, PbEpoch}; +use risingwave_expr::expr::{Expression, NonStrictExpression}; +use risingwave_pb::data::PbEpoch; use risingwave_pb::expr::PbInputRef; use risingwave_pb::stream_plan::barrier::{BarrierKind, PbMutation}; use risingwave_pb::stream_plan::stream_message::StreamMessage; @@ -333,13 +332,7 @@ impl Barrier { } } - /// Whether this barrier is for configuration change. Used for source executor initialization. - pub fn is_update(&self) -> bool { - matches!(self.mutation.as_deref(), Some(Mutation::Update { .. })) - } - - /// Whether this barrier is for resume. Used for now executor to determine whether to yield a - /// chunk and a watermark before this barrier. + /// Whether this barrier is for resume. pub fn is_resume(&self) -> bool { matches!(self.mutation.as_deref(), Some(Mutation::Resume)) } @@ -648,9 +641,8 @@ impl Watermark { pub async fn transform_with_expr( self, - expr: &BoxedExpression, + expr: &NonStrictExpression, new_col_idx: usize, - on_err: impl Fn(ExprError), ) -> Option { let Self { col_idx, val, .. } = self; let row = { @@ -658,8 +650,8 @@ impl Watermark { row[col_idx] = Some(val); OwnedRow::new(row) }; - let val = expr.eval_row_infallible(&row, on_err).await?; - Some(Self::new(new_col_idx, expr.return_type(), val)) + let val = expr.eval_row_infallible(&row).await?; + Some(Self::new(new_col_idx, expr.inner().return_type(), val)) } /// Transform the watermark with the given output indices. If this watermark is not in the @@ -677,16 +669,14 @@ impl Watermark { index: self.col_idx as _, r#type: Some(self.data_type.to_protobuf()), }), - val: Some(PbDatum { - body: serialize_datum(Some(&self.val)), - }), + val: Some(&self.val).to_protobuf().into(), } } pub fn from_protobuf(prost: &PbWatermark) -> StreamExecutorResult { let col_ref = prost.get_column()?; let data_type = DataType::from(col_ref.get_type()?); - let val = deserialize_datum(prost.get_val()?.get_body().as_slice(), &data_type)? + let val = Datum::from_protobuf(prost.get_val()?, &data_type)? .expect("watermark value cannot be null"); Ok(Self::new(col_ref.get_index() as _, data_type, val)) } diff --git a/src/stream/src/executor/monitor/streaming_stats.rs b/src/stream/src/executor/monitor/streaming_stats.rs index 2fc0c1b8b3b7d..a31727e76639f 100644 --- a/src/stream/src/executor/monitor/streaming_stats.rs +++ b/src/stream/src/executor/monitor/streaming_stats.rs @@ -17,23 +17,33 @@ use std::sync::OnceLock; use prometheus::core::{AtomicF64, AtomicI64, AtomicU64, GenericCounterVec, GenericGaugeVec}; use prometheus::{ exponential_buckets, histogram_opts, register_gauge_vec_with_registry, - register_histogram_vec_with_registry, register_histogram_with_registry, - register_int_counter_vec_with_registry, register_int_counter_with_registry, - register_int_gauge_vec_with_registry, register_int_gauge_with_registry, Histogram, - HistogramVec, IntCounter, IntGauge, Registry, + register_histogram_with_registry, register_int_counter_vec_with_registry, + register_int_counter_with_registry, register_int_gauge_vec_with_registry, + register_int_gauge_with_registry, Histogram, IntCounter, IntGauge, Registry, }; use risingwave_common::config::MetricLevel; -use risingwave_common::metrics::RelabeledHistogramVec; +use risingwave_common::metrics::{ + LabelGuardedHistogramVec, LabelGuardedIntCounterVec, LabelGuardedIntGaugeVec, + RelabeledGuardedHistogramVec, +}; use risingwave_common::monitor::GLOBAL_METRICS_REGISTRY; +use risingwave_common::{ + register_guarded_histogram_vec_with_registry, register_guarded_int_counter_vec_with_registry, + register_guarded_int_gauge_vec_with_registry, +}; +use risingwave_connector::sink::SinkMetrics; #[derive(Clone)] pub struct StreamingMetrics { pub level: MetricLevel, + // Executor metrics (disabled by default) pub executor_row_count: GenericCounterVec, + + // Streaming actor metrics from tokio (disabled by default) pub actor_execution_time: GenericGaugeVec, - pub actor_output_buffer_blocking_duration_ns: GenericCounterVec, - pub actor_input_buffer_blocking_duration_ns: GenericCounterVec, + pub actor_output_buffer_blocking_duration_ns: LabelGuardedIntCounterVec<3>, + pub actor_input_buffer_blocking_duration_ns: LabelGuardedIntCounterVec<3>, pub actor_scheduled_duration: GenericGaugeVec, pub actor_scheduled_cnt: GenericGaugeVec, pub actor_fast_poll_duration: GenericGaugeVec, @@ -44,38 +54,45 @@ pub struct StreamingMetrics { pub actor_poll_cnt: GenericGaugeVec, pub actor_idle_duration: GenericGaugeVec, pub actor_idle_cnt: GenericGaugeVec, + + // Streaming actor pub actor_memory_usage: GenericGaugeVec, - pub actor_in_record_cnt: GenericCounterVec, - pub actor_out_record_cnt: GenericCounterVec, - pub actor_sampled_deserialize_duration_ns: GenericCounterVec, + pub actor_in_record_cnt: LabelGuardedIntCounterVec<2>, + pub actor_out_record_cnt: LabelGuardedIntCounterVec<2>, + + // Source pub source_output_row_count: GenericCounterVec, pub source_row_per_barrier: GenericCounterVec, pub source_split_change_count: GenericCounterVec, + // Sink & materialized view + pub sink_input_row_count: GenericCounterVec, + pub mview_input_row_count: GenericCounterVec, + // Exchange (see also `compute::ExchangeServiceMetrics`) pub exchange_frag_recv_size: GenericCounterVec, // Streaming Join - pub join_lookup_miss_count: GenericCounterVec, - pub join_total_lookup_count: GenericCounterVec, - pub join_insert_cache_miss_count: GenericCounterVec, - pub join_actor_input_waiting_duration_ns: GenericCounterVec, - pub join_match_duration_ns: GenericCounterVec, - pub join_barrier_align_duration: RelabeledHistogramVec, - pub join_cached_entries: GenericGaugeVec, - pub join_cached_rows: GenericGaugeVec, - pub join_cached_estimated_size: GenericGaugeVec, - pub join_matched_join_keys: RelabeledHistogramVec, + pub join_lookup_miss_count: LabelGuardedIntCounterVec<5>, + pub join_lookup_total_count: LabelGuardedIntCounterVec<5>, + pub join_insert_cache_miss_count: LabelGuardedIntCounterVec<5>, + pub join_actor_input_waiting_duration_ns: LabelGuardedIntCounterVec<2>, + pub join_match_duration_ns: LabelGuardedIntCounterVec<3>, + pub join_barrier_align_duration: RelabeledGuardedHistogramVec<3>, + pub join_cached_entry_count: LabelGuardedIntGaugeVec<3>, + pub join_matched_join_keys: RelabeledGuardedHistogramVec<3>, // Streaming Aggregation pub agg_lookup_miss_count: GenericCounterVec, pub agg_total_lookup_count: GenericCounterVec, - pub agg_cached_keys: GenericGaugeVec, + pub agg_cached_entry_count: GenericGaugeVec, pub agg_chunk_lookup_miss_count: GenericCounterVec, pub agg_chunk_total_lookup_count: GenericCounterVec, pub agg_distinct_cache_miss_count: GenericCounterVec, pub agg_distinct_total_cache_count: GenericCounterVec, pub agg_distinct_cached_entry_count: GenericGaugeVec, + pub agg_dirty_groups_count: GenericGaugeVec, + pub agg_dirty_groups_heap_size: GenericGaugeVec, // Streaming TopN pub group_top_n_cache_miss_count: GenericCounterVec, @@ -85,7 +102,7 @@ pub struct StreamingMetrics { pub group_top_n_appendonly_total_query_cache_count: GenericCounterVec, pub group_top_n_appendonly_cached_entry_count: GenericGaugeVec, - // look up + // Lookup executor pub lookup_cache_miss_count: GenericCounterVec, pub lookup_total_query_cache_count: GenericCounterVec, pub lookup_cached_entry_count: GenericGaugeVec, @@ -117,7 +134,18 @@ pub struct StreamingMetrics { /// The progress made by the earliest in-flight barriers in the local barrier manager. pub barrier_manager_progress: IntCounter, - pub sink_commit_duration: HistogramVec, + // Sink related metrics + pub sink_commit_duration: LabelGuardedHistogramVec<3>, + pub connector_sink_rows_received: LabelGuardedIntCounterVec<2>, + pub log_store_first_write_epoch: LabelGuardedIntGaugeVec<3>, + pub log_store_latest_write_epoch: LabelGuardedIntGaugeVec<3>, + pub log_store_write_rows: LabelGuardedIntCounterVec<3>, + pub log_store_latest_read_epoch: LabelGuardedIntGaugeVec<3>, + pub log_store_read_rows: LabelGuardedIntCounterVec<3>, + pub kv_log_store_storage_write_count: LabelGuardedIntCounterVec<3>, + pub kv_log_store_storage_write_size: LabelGuardedIntCounterVec<3>, + pub kv_log_store_storage_read_count: LabelGuardedIntCounterVec<4>, + pub kv_log_store_storage_read_size: LabelGuardedIntCounterVec<4>, // Memory management // FIXME(yuhao): use u64 here @@ -125,9 +153,11 @@ pub struct StreamingMetrics { pub lru_physical_now_ms: IntGauge, pub lru_runtime_loop_count: IntCounter, pub lru_watermark_step: IntGauge, - pub lru_evicted_watermark_time_diff_ms: GenericGaugeVec, + pub lru_evicted_watermark_time_ms: GenericGaugeVec, pub jemalloc_allocated_bytes: IntGauge, pub jemalloc_active_bytes: IntGauge, + pub jvm_allocated_bytes: IntGauge, + pub jvm_active_bytes: IntGauge, /// User compute error reporting pub user_compute_error_count: GenericCounterVec, @@ -156,7 +186,7 @@ impl StreamingMetrics { let executor_row_count = register_int_counter_vec_with_registry!( "stream_executor_row_count", "Total number of rows that have been output from each executor", - &["actor_id", "executor_identity"], + &["actor_id", "fragment_id", "executor_identity"], registry ) .unwrap(); @@ -185,30 +215,48 @@ impl StreamingMetrics { ) .unwrap(); - let actor_execution_time = register_gauge_vec_with_registry!( - "stream_actor_actor_execution_time", - "Total execution time (s) of an actor", - &["actor_id"], + let sink_input_row_count = register_int_counter_vec_with_registry!( + "stream_sink_input_row_count", + "Total number of rows streamed into sink executors", + &["sink_id", "actor_id", "fragment_id"], registry ) .unwrap(); - let actor_output_buffer_blocking_duration_ns = register_int_counter_vec_with_registry!( - "stream_actor_output_buffer_blocking_duration_ns", - "Total blocking duration (ns) of output buffer", - &["actor_id"], + let mview_input_row_count = register_int_counter_vec_with_registry!( + "stream_mview_input_row_count", + "Total number of rows streamed into materialize executors", + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); - let actor_input_buffer_blocking_duration_ns = register_int_counter_vec_with_registry!( - "stream_actor_input_buffer_blocking_duration_ns", - "Total blocking duration (ns) of input buffer", - &["actor_id", "upstream_fragment_id"], + let actor_execution_time = register_gauge_vec_with_registry!( + "stream_actor_actor_execution_time", + "Total execution time (s) of an actor", + &["actor_id"], registry ) .unwrap(); + let actor_output_buffer_blocking_duration_ns = + register_guarded_int_counter_vec_with_registry!( + "stream_actor_output_buffer_blocking_duration_ns", + "Total blocking duration (ns) of output buffer", + &["actor_id", "fragment_id", "downstream_fragment_id"], + registry + ) + .unwrap(); + + let actor_input_buffer_blocking_duration_ns = + register_guarded_int_counter_vec_with_registry!( + "stream_actor_input_buffer_blocking_duration_ns", + "Total blocking duration (ns) of input buffer", + &["actor_id", "fragment_id", "upstream_fragment_id"], + registry + ) + .unwrap(); + let exchange_frag_recv_size = register_int_counter_vec_with_registry!( "stream_exchange_frag_recv_size", "Total size of messages that have been received from upstream Fragment", @@ -297,26 +345,18 @@ impl StreamingMetrics { ) .unwrap(); - let actor_in_record_cnt = register_int_counter_vec_with_registry!( + let actor_in_record_cnt = register_guarded_int_counter_vec_with_registry!( "stream_actor_in_record_cnt", "Total number of rows actor received", - &["actor_id"], + &["actor_id", "fragment_id"], registry ) .unwrap(); - let actor_out_record_cnt = register_int_counter_vec_with_registry!( + let actor_out_record_cnt = register_guarded_int_counter_vec_with_registry!( "stream_actor_out_record_cnt", "Total number of rows actor sent", - &["actor_id"], - registry - ) - .unwrap(); - - let actor_sampled_deserialize_duration_ns = register_int_counter_vec_with_registry!( - "actor_sampled_deserialize_duration_ns", - "Duration (ns) of sampled chunk deserialization", - &["actor_id"], + &["actor_id", "fragment_id"], registry ) .unwrap(); @@ -324,47 +364,65 @@ impl StreamingMetrics { let actor_memory_usage = register_int_gauge_vec_with_registry!( "actor_memory_usage", "Memory usage (bytes)", - &["actor_id"], + &["actor_id", "fragment_id"], registry, ) .unwrap(); - let join_lookup_miss_count = register_int_counter_vec_with_registry!( + let join_lookup_miss_count = register_guarded_int_counter_vec_with_registry!( "stream_join_lookup_miss_count", "Join executor lookup miss duration", - &["side", "join_table_id", "degree_table_id", "actor_id"], + &[ + "side", + "join_table_id", + "degree_table_id", + "actor_id", + "fragment_id" + ], registry ) .unwrap(); - let join_total_lookup_count = register_int_counter_vec_with_registry!( + let join_lookup_total_count = register_guarded_int_counter_vec_with_registry!( "stream_join_lookup_total_count", "Join executor lookup total operation", - &["side", "join_table_id", "degree_table_id", "actor_id"], + &[ + "side", + "join_table_id", + "degree_table_id", + "actor_id", + "fragment_id" + ], registry ) .unwrap(); - let join_insert_cache_miss_count = register_int_counter_vec_with_registry!( + let join_insert_cache_miss_count = register_guarded_int_counter_vec_with_registry!( "stream_join_insert_cache_miss_count", "Join executor cache miss when insert operation", - &["side", "join_table_id", "degree_table_id", "actor_id"], + &[ + "side", + "join_table_id", + "degree_table_id", + "actor_id", + "fragment_id" + ], registry ) .unwrap(); - let join_actor_input_waiting_duration_ns = register_int_counter_vec_with_registry!( + let join_actor_input_waiting_duration_ns = register_guarded_int_counter_vec_with_registry!( "stream_join_actor_input_waiting_duration_ns", "Total waiting duration (ns) of input buffer of join actor", - &["actor_id"], + &["actor_id", "fragment_id"], registry ) .unwrap(); - let join_match_duration_ns = register_int_counter_vec_with_registry!( + let join_match_duration_ns = register_guarded_int_counter_vec_with_registry!( "stream_join_match_duration_ns", "Matching duration for each side", - &["actor_id", "side"], + &["actor_id", "fragment_id", "side"], registry ) .unwrap(); @@ -374,40 +432,24 @@ impl StreamingMetrics { "Duration of join align barrier", exponential_buckets(0.0001, 2.0, 21).unwrap() // max 104s ); - let join_barrier_align_duration = register_histogram_vec_with_registry!( + let join_barrier_align_duration = register_guarded_histogram_vec_with_registry!( opts, &["actor_id", "fragment_id", "wait_side"], registry ) .unwrap(); - let join_barrier_align_duration = RelabeledHistogramVec::with_metric_level_relabel_n( + let join_barrier_align_duration = RelabeledGuardedHistogramVec::with_metric_level_relabel_n( MetricLevel::Debug, join_barrier_align_duration, level, 1, ); - let join_cached_entries = register_int_gauge_vec_with_registry!( - "stream_join_cached_entries", + let join_cached_entry_count = register_guarded_int_gauge_vec_with_registry!( + "stream_join_cached_entry_count", "Number of cached entries in streaming join operators", - &["actor_id", "side"], - registry - ) - .unwrap(); - - let join_cached_rows = register_int_gauge_vec_with_registry!( - "stream_join_cached_rows", - "Number of cached rows in streaming join operators", - &["actor_id", "side"], - registry - ) - .unwrap(); - - let join_cached_estimated_size = register_int_gauge_vec_with_registry!( - "stream_join_cached_estimated_size", - "Estimated size of all cached entries in streaming join operators", - &["actor_id", "side"], + &["actor_id", "fragment_id", "side"], registry ) .unwrap(); @@ -418,14 +460,14 @@ impl StreamingMetrics { exponential_buckets(16.0, 2.0, 28).unwrap() // max 2^31 ); - let join_matched_join_keys = register_histogram_vec_with_registry!( + let join_matched_join_keys = register_guarded_histogram_vec_with_registry!( join_matched_join_keys_opts, &["actor_id", "fragment_id", "table_id"], registry ) .unwrap(); - let join_matched_join_keys = RelabeledHistogramVec::with_metric_level_relabel_n( + let join_matched_join_keys = RelabeledGuardedHistogramVec::with_metric_level_relabel_n( MetricLevel::Debug, join_matched_join_keys, level, @@ -435,7 +477,7 @@ impl StreamingMetrics { let agg_lookup_miss_count = register_int_counter_vec_with_registry!( "stream_agg_lookup_miss_count", "Aggregation executor lookup miss duration", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -443,7 +485,7 @@ impl StreamingMetrics { let agg_total_lookup_count = register_int_counter_vec_with_registry!( "stream_agg_lookup_total_count", "Aggregation executor lookup total operation", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -451,7 +493,7 @@ impl StreamingMetrics { let agg_distinct_cache_miss_count = register_int_counter_vec_with_registry!( "stream_agg_distinct_cache_miss_count", "Aggregation executor dinsinct miss duration", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -459,7 +501,7 @@ impl StreamingMetrics { let agg_distinct_total_cache_count = register_int_counter_vec_with_registry!( "stream_agg_distinct_total_cache_count", "Aggregation executor distinct total operation", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -467,7 +509,23 @@ impl StreamingMetrics { let agg_distinct_cached_entry_count = register_int_gauge_vec_with_registry!( "stream_agg_distinct_cached_entry_count", "Total entry counts in distinct aggregation executor cache", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], + registry + ) + .unwrap(); + + let agg_dirty_groups_count = register_int_gauge_vec_with_registry!( + "stream_agg_dirty_groups_count", + "Total dirty group counts in aggregation executor", + &["table_id", "actor_id", "fragment_id"], + registry + ) + .unwrap(); + + let agg_dirty_groups_heap_size = register_int_gauge_vec_with_registry!( + "stream_agg_dirty_groups_heap_size", + "Total dirty group heap size in aggregation executor", + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -475,7 +533,7 @@ impl StreamingMetrics { let group_top_n_cache_miss_count = register_int_counter_vec_with_registry!( "stream_group_top_n_cache_miss_count", "Group top n executor cache miss count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -483,7 +541,7 @@ impl StreamingMetrics { let group_top_n_total_query_cache_count = register_int_counter_vec_with_registry!( "stream_group_top_n_total_query_cache_count", "Group top n executor query cache total count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -491,7 +549,7 @@ impl StreamingMetrics { let group_top_n_cached_entry_count = register_int_gauge_vec_with_registry!( "stream_group_top_n_cached_entry_count", "Total entry counts in group top n executor cache", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -499,7 +557,7 @@ impl StreamingMetrics { let group_top_n_appendonly_cache_miss_count = register_int_counter_vec_with_registry!( "stream_group_top_n_appendonly_cache_miss_count", "Group top n appendonly executor cache miss count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -508,7 +566,7 @@ impl StreamingMetrics { register_int_counter_vec_with_registry!( "stream_group_top_n_appendonly_total_query_cache_count", "Group top n appendonly executor total cache count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -516,7 +574,7 @@ impl StreamingMetrics { let group_top_n_appendonly_cached_entry_count = register_int_gauge_vec_with_registry!( "stream_group_top_n_appendonly_cached_entry_count", "Total entry counts in group top n appendonly executor cache", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -524,7 +582,7 @@ impl StreamingMetrics { let lookup_cache_miss_count = register_int_counter_vec_with_registry!( "stream_lookup_cache_miss_count", "Lookup executor cache miss count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -532,7 +590,7 @@ impl StreamingMetrics { let lookup_total_query_cache_count = register_int_counter_vec_with_registry!( "stream_lookup_total_query_cache_count", "Lookup executor query cache total count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -540,7 +598,7 @@ impl StreamingMetrics { let lookup_cached_entry_count = register_int_gauge_vec_with_registry!( "stream_lookup_cached_entry_count", "Total entry counts in lookup executor cache", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -548,7 +606,7 @@ impl StreamingMetrics { let temporal_join_cache_miss_count = register_int_counter_vec_with_registry!( "stream_temporal_join_cache_miss_count", "Temporal join executor cache miss count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -556,7 +614,7 @@ impl StreamingMetrics { let temporal_join_total_query_cache_count = register_int_counter_vec_with_registry!( "stream_temporal_join_total_query_cache_count", "Temporal join executor query cache total count", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -564,15 +622,15 @@ impl StreamingMetrics { let temporal_join_cached_entry_count = register_int_gauge_vec_with_registry!( "stream_temporal_join_cached_entry_count", "Total entry count in temporal join executor cache", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); - let agg_cached_keys = register_int_gauge_vec_with_registry!( - "stream_agg_cached_keys", + let agg_cached_entry_count = register_int_gauge_vec_with_registry!( + "stream_agg_cached_entry_count", "Number of cached keys in streaming aggregation operators", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -580,7 +638,7 @@ impl StreamingMetrics { let agg_chunk_lookup_miss_count = register_int_counter_vec_with_registry!( "stream_agg_chunk_lookup_miss_count", "Aggregation executor chunk-level lookup miss duration", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -588,7 +646,7 @@ impl StreamingMetrics { let agg_chunk_total_lookup_count = register_int_counter_vec_with_registry!( "stream_agg_chunk_lookup_total_count", "Aggregation executor chunk-level lookup total operation", - &["table_id", "actor_id"], + &["table_id", "actor_id", "fragment_id"], registry ) .unwrap(); @@ -671,10 +729,90 @@ impl StreamingMetrics { ) .unwrap(); - let sink_commit_duration = register_histogram_vec_with_registry!( + let sink_commit_duration = register_guarded_histogram_vec_with_registry!( "sink_commit_duration", "Duration of commit op in sink", - &["executor_id", "connector"], + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let connector_sink_rows_received = register_guarded_int_counter_vec_with_registry!( + "connector_sink_rows_received", + "Number of rows received by sink", + &["connector_type", "sink_id"], + registry + ) + .unwrap(); + + let log_store_first_write_epoch = register_guarded_int_gauge_vec_with_registry!( + "log_store_first_write_epoch", + "The first write epoch of log store", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let log_store_latest_write_epoch = register_guarded_int_gauge_vec_with_registry!( + "log_store_latest_write_epoch", + "The latest write epoch of log store", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let log_store_write_rows = register_guarded_int_counter_vec_with_registry!( + "log_store_write_rows", + "The write rate of rows", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let log_store_latest_read_epoch = register_guarded_int_gauge_vec_with_registry!( + "log_store_latest_read_epoch", + "The latest read epoch of log store", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let log_store_read_rows = register_guarded_int_counter_vec_with_registry!( + "log_store_read_rows", + "The read rate of rows", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let kv_log_store_storage_write_count = register_guarded_int_counter_vec_with_registry!( + "kv_log_store_storage_write_count", + "Write row count throughput of kv log store", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let kv_log_store_storage_write_size = register_guarded_int_counter_vec_with_registry!( + "kv_log_store_storage_write_size", + "Write size throughput of kv log store", + &["executor_id", "connector", "sink_id"], + registry + ) + .unwrap(); + + let kv_log_store_storage_read_count = register_guarded_int_counter_vec_with_registry!( + "kv_log_store_storage_read_count", + "Write row count throughput of kv log store", + &["executor_id", "connector", "sink_id", "read_type"], + registry + ) + .unwrap(); + + let kv_log_store_storage_read_size = register_guarded_int_counter_vec_with_registry!( + "kv_log_store_storage_read_size", + "Write size throughput of kv log store", + &["executor_id", "connector", "sink_id", "read_type"], registry ) .unwrap(); @@ -707,9 +845,9 @@ impl StreamingMetrics { ) .unwrap(); - let lru_evicted_watermark_time_diff_ms = register_int_gauge_vec_with_registry!( - "lru_evicted_watermark_time_diff_ms", - "The diff between current watermark and latest evicted watermark time by actors", + let lru_evicted_watermark_time_ms = register_int_gauge_vec_with_registry!( + "lru_evicted_watermark_time_ms", + "The latest evicted watermark time by actors", &["table_id", "actor_id", "desc"], registry ) @@ -729,6 +867,20 @@ impl StreamingMetrics { ) .unwrap(); + let jvm_allocated_bytes = register_int_gauge_with_registry!( + "jvm_allocated_bytes", + "The allocated jvm memory", + registry + ) + .unwrap(); + + let jvm_active_bytes = register_int_gauge_with_registry!( + "jvm_active_bytes", + "The active jvm memory", + registry + ) + .unwrap(); + let user_compute_error_count = register_int_counter_vec_with_registry!( "user_compute_error_count", "Compute errors in the system, queryable by tags", @@ -794,29 +946,30 @@ impl StreamingMetrics { actor_memory_usage, actor_in_record_cnt, actor_out_record_cnt, - actor_sampled_deserialize_duration_ns, source_output_row_count, source_row_per_barrier, source_split_change_count, + sink_input_row_count, + mview_input_row_count, exchange_frag_recv_size, join_lookup_miss_count, - join_total_lookup_count, + join_lookup_total_count, join_insert_cache_miss_count, join_actor_input_waiting_duration_ns, join_match_duration_ns, join_barrier_align_duration, - join_cached_entries, - join_cached_rows, - join_cached_estimated_size, + join_cached_entry_count, join_matched_join_keys, agg_lookup_miss_count, agg_total_lookup_count, - agg_cached_keys, + agg_cached_entry_count, agg_chunk_lookup_miss_count, agg_chunk_total_lookup_count, agg_distinct_cache_miss_count, agg_distinct_total_cache_count, agg_distinct_cached_entry_count, + agg_dirty_groups_count, + agg_dirty_groups_heap_size, group_top_n_cache_miss_count, group_top_n_total_query_cache_count, group_top_n_cached_entry_count, @@ -840,13 +993,25 @@ impl StreamingMetrics { barrier_sync_latency, barrier_manager_progress, sink_commit_duration, + connector_sink_rows_received, + log_store_first_write_epoch, + log_store_latest_write_epoch, + log_store_write_rows, + log_store_latest_read_epoch, + log_store_read_rows, + kv_log_store_storage_write_count, + kv_log_store_storage_write_size, + kv_log_store_storage_read_count, + kv_log_store_storage_read_size, lru_current_watermark_time_ms, lru_physical_now_ms, lru_runtime_loop_count, lru_watermark_step, - lru_evicted_watermark_time_diff_ms, + lru_evicted_watermark_time_ms, jemalloc_allocated_bytes, jemalloc_active_bytes, + jvm_allocated_bytes, + jvm_active_bytes, user_compute_error_count, user_source_reader_error_count, materialize_cache_hit_count, @@ -859,4 +1024,42 @@ impl StreamingMetrics { pub fn unused() -> Self { global_streaming_metrics(MetricLevel::Disabled) } + + pub fn new_sink_metrics( + &self, + identity: &str, + sink_id_str: &str, + connector: &str, + ) -> SinkMetrics { + let label_list = [identity, connector, sink_id_str]; + let sink_commit_duration_metrics = self.sink_commit_duration.with_label_values(&label_list); + let connector_sink_rows_received = self + .connector_sink_rows_received + .with_label_values(&[connector, sink_id_str]); + + let log_store_latest_read_epoch = self + .log_store_latest_read_epoch + .with_label_values(&label_list); + + let log_store_latest_write_epoch = self + .log_store_latest_write_epoch + .with_label_values(&label_list); + + let log_store_first_write_epoch = self + .log_store_first_write_epoch + .with_label_values(&label_list); + + let log_store_write_rows = self.log_store_write_rows.with_label_values(&label_list); + let log_store_read_rows = self.log_store_read_rows.with_label_values(&label_list); + + SinkMetrics { + sink_commit_duration_metrics, + connector_sink_rows_received, + log_store_first_write_epoch, + log_store_latest_write_epoch, + log_store_write_rows, + log_store_latest_read_epoch, + log_store_read_rows, + } + } } diff --git a/src/stream/src/executor/mview/materialize.rs b/src/stream/src/executor/mview/materialize.rs index 0c9a981d18108..cfb02ec34c481 100644 --- a/src/stream/src/executor/mview/materialize.rs +++ b/src/stream/src/executor/mview/materialize.rs @@ -22,7 +22,7 @@ use enum_as_inner::EnumAsInner; use futures::{stream, StreamExt}; use futures_async_stream::try_stream; use itertools::{izip, Itertools}; -use risingwave_common::array::{Op, StreamChunk, Vis}; +use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::{ColumnDesc, ColumnId, ConflictBehavior, Schema, TableId}; use risingwave_common::estimate_size::EstimateSize; @@ -52,16 +52,15 @@ use crate::task::AtomicU64Ref; /// `MaterializeExecutor` materializes changes in stream into a materialized view on storage. pub struct MaterializeExecutor { input: BoxedExecutor, + info: ExecutorInfo, state_table: StateTableInner, /// Columns of arrange keys (including pk, group keys, join keys, etc.) - arrange_columns: Vec, + arrange_key_indices: Vec, actor_context: ActorContextRef, - info: ExecutorInfo, - materialize_cache: MaterializeCache, conflict_behavior: ConflictBehavior, @@ -74,9 +73,9 @@ impl MaterializeExecutor { #[allow(clippy::too_many_arguments)] pub async fn new( input: BoxedExecutor, + info: ExecutorInfo, store: S, - key: Vec, - executor_id: u64, + arrange_key: Vec, actor_context: ActorContextRef, vnodes: Option>, table_catalog: &Table, @@ -84,9 +83,7 @@ impl MaterializeExecutor { conflict_behavior: ConflictBehavior, metrics: Arc, ) -> Self { - let arrange_columns: Vec = key.iter().map(|k| k.column_index).collect(); - - let schema = input.schema().clone(); + let arrange_key_indices: Vec = arrange_key.iter().map(|k| k.column_index).collect(); let state_table = if table_catalog.version.is_some() { // TODO: If we do some `Delete` after schema change, we cannot ensure the encoded value @@ -104,14 +101,10 @@ impl MaterializeExecutor { Self { input, + info, state_table, - arrange_columns: arrange_columns.clone(), + arrange_key_indices, actor_context, - info: ExecutorInfo { - schema, - pk_indices: arrange_columns, - identity: format!("MaterializeExecutor {:X}", executor_id), - }, materialize_cache: MaterializeCache::new(watermark_epoch, metrics_info), conflict_behavior, } @@ -119,6 +112,11 @@ impl MaterializeExecutor { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(mut self) { + // for metrics + let table_id_str = self.state_table.table_id().to_string(); + let actor_id_str = self.actor_context.id.to_string(); + let fragment_id_str = self.actor_context.fragment_id.to_string(); + let data_types = self.schema().data_types().clone(); let mut input = self.input.execute(); @@ -136,6 +134,12 @@ impl MaterializeExecutor { yield match msg { Message::Watermark(w) => Message::Watermark(w), Message::Chunk(chunk) => { + self.actor_context + .streaming_metrics + .mview_input_row_count + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) + .inc_by(chunk.cardinality() as u64); + match self.conflict_behavior { ConflictBehavior::Overwrite | ConflictBehavior::IgnoreConflict => { // create MaterializeBuffer from chunk @@ -231,7 +235,7 @@ impl MaterializeExecutor { Self { input, state_table, - arrange_columns: arrange_columns.clone(), + arrange_key_indices: arrange_columns.clone(), actor_context: ActorContext::create(0), info: ExecutorInfo { schema, @@ -281,7 +285,7 @@ fn generate_output( } if let Some(new_data_chunk) = data_chunk_builder.consume_all() { - let new_stream_chunk = StreamChunk::new(new_ops, new_data_chunk.columns().to_vec(), None); + let new_stream_chunk = StreamChunk::new(new_ops, new_data_chunk.columns().to_vec()); Ok(Some(new_stream_chunk)) } else { Ok(None) @@ -329,26 +333,12 @@ impl MaterializeBuffer { let (_, vis) = key_chunk.into_parts(); let mut buffer = MaterializeBuffer::new(); - match vis { - Vis::Bitmap(vis) => { - for ((op, key, value), vis) in - izip!(ops.iter(), pks, values).zip_eq_debug(vis.iter()) - { - if vis { - match op { - Op::Insert | Op::UpdateInsert => buffer.insert(key, value), - Op::Delete | Op::UpdateDelete => buffer.delete(key, value), - }; - } - } - } - Vis::Compact(_) => { - for (op, key, value) in izip!(ops.iter(), pks, values) { - match op { - Op::Insert | Op::UpdateInsert => buffer.insert(key, value), - Op::Delete | Op::UpdateDelete => buffer.delete(key, value), - }; - } + for ((op, key, value), vis) in izip!(ops.iter(), pks, values).zip_eq_debug(vis.iter()) { + if vis { + match op { + Op::Insert | Op::UpdateInsert => buffer.insert(key, value), + Op::Delete | Op::UpdateDelete => buffer.delete(key, value), + }; } } buffer @@ -426,8 +416,8 @@ impl Executor for MaterializeExecutor { impl std::fmt::Debug for MaterializeExecutor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("MaterializeExecutor") - .field("input info", &self.info()) - .field("arrange_columns", &self.arrange_columns) + .field("info", &self.info()) + .field("arrange_key_indices", &self.arrange_key_indices) .finish() } } diff --git a/src/stream/src/executor/now.rs b/src/stream/src/executor/now.rs index d2cbf05d71f80..2ee5468ff5ad9 100644 --- a/src/stream/src/executor/now.rs +++ b/src/stream/src/executor/now.rs @@ -12,11 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Bound; +use std::ops::Bound::Unbounded; + use futures::{pin_mut, StreamExt}; use futures_async_stream::try_stream; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::catalog::{Field, Schema}; -use risingwave_common::row; +use risingwave_common::row::{self, OwnedRow}; use risingwave_common::types::{DataType, Datum}; use risingwave_storage::StateStore; use tokio::sync::mpsc::UnboundedReceiver; @@ -79,9 +82,11 @@ impl NowExecutor { if !initialized { // Handle the first barrier. state_table.init_epoch(barrier.epoch); - let state_row = { - let data_iter = state_table.iter_row(Default::default()).await?; + let sub_range: &(Bound, Bound) = &(Unbounded, Unbounded); + let data_iter = state_table + .iter_with_prefix(row::empty(), sub_range, Default::default()) + .await?; pin_mut!(data_iter); if let Some(keyed_row) = data_iter.next().await { Some(keyed_row?) @@ -90,6 +95,7 @@ impl NowExecutor { } }; last_timestamp = state_row.and_then(|row| row[0].clone()); + paused = barrier.is_pause_on_startup(); initialized = true; } else if paused { // Assert that no data is updated. @@ -104,7 +110,7 @@ impl NowExecutor { // Update paused state. if let Some(mutation) = barrier.mutation.as_deref() { match mutation { - Mutation::Pause | Mutation::Update { .. } => paused = true, + Mutation::Pause => paused = true, Mutation::Resume => paused = false, _ => {} } diff --git a/src/stream/src/executor/over_window/eowc.rs b/src/stream/src/executor/over_window/eowc.rs index 22553641369c1..b5da45edd47e5 100644 --- a/src/stream/src/executor/over_window/eowc.rs +++ b/src/stream/src/executor/over_window/eowc.rs @@ -13,6 +13,7 @@ // limitations under the License. use std::marker::PhantomData; +use std::ops::Bound; use futures::StreamExt; use futures_async_stream::{for_await, try_stream}; @@ -195,10 +196,15 @@ impl EowcOverWindowExecutor { curr_row_buffer: Default::default(), }; + let sub_range: &(Bound, Bound) = &(Bound::Unbounded, Bound::Unbounded); // Recover states from state table. let table_iter = this .state_table - .iter_row_with_pk_prefix(partition_key, PrefetchOptions::new_for_exhaust_iter()) + .iter_with_prefix( + partition_key, + sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) .await?; #[for_await] @@ -352,11 +358,7 @@ impl EowcOverWindowExecutor { let columns: Vec = builders.into_iter().map(|b| b.finish().into()).collect(); let chunk_size = columns[0].len(); Ok(if chunk_size > 0 { - Some(StreamChunk::new( - vec![Op::Insert; chunk_size], - columns, - None, - )) + Some(StreamChunk::new(vec![Op::Insert; chunk_size], columns)) } else { None }) diff --git a/src/stream/src/executor/over_window/general.rs b/src/stream/src/executor/over_window/general.rs index 091e199d7b52a..9e66835b54b05 100644 --- a/src/stream/src/executor/over_window/general.rs +++ b/src/stream/src/executor/over_window/general.rs @@ -21,7 +21,7 @@ use futures::StreamExt; use futures_async_stream::try_stream; use itertools::Itertools; use risingwave_common::array::stream_record::Record; -use risingwave_common::array::{RowRef, StreamChunk}; +use risingwave_common::array::{Op, RowRef, StreamChunk}; use risingwave_common::catalog::Field; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::session_config::OverWindowCachePolicy as CachePolicy; @@ -225,26 +225,25 @@ impl OverWindowExecutor { chunk: &'a StreamChunk, ) -> impl Iterator>> { let mut changes_merged = BTreeMap::new(); - for record in chunk.records() { - match record { - Record::Insert { new_row } => { - let pk = DefaultOrdered(this.get_input_pk(new_row)); + for (op, row) in chunk.rows() { + let pk = DefaultOrdered(this.get_input_pk(row)); + match op { + Op::Insert | Op::UpdateInsert => { if let Some(prev_change) = changes_merged.get_mut(&pk) { match prev_change { Record::Delete { old_row } => { *prev_change = Record::Update { old_row: *old_row, - new_row, + new_row: row, }; } _ => panic!("inconsistent changes in input chunk"), } } else { - changes_merged.insert(pk, record); + changes_merged.insert(pk, Record::Insert { new_row: row }); } } - Record::Delete { old_row } => { - let pk = DefaultOrdered(this.get_input_pk(old_row)); + Op::Delete | Op::UpdateDelete => { if let Some(prev_change) = changes_merged.get_mut(&pk) { match prev_change { Record::Insert { .. } => { @@ -261,29 +260,7 @@ impl OverWindowExecutor { _ => panic!("inconsistent changes in input chunk"), } } else { - changes_merged.insert(pk, record); - } - } - Record::Update { old_row, new_row } => { - let pk = DefaultOrdered(this.get_input_pk(old_row)); - if let Some(prev_change) = changes_merged.get_mut(&pk) { - match prev_change { - Record::Insert { .. } => { - *prev_change = Record::Insert { new_row }; - } - Record::Update { - old_row: real_old_row, - .. - } => { - *prev_change = Record::Update { - old_row: *real_old_row, - new_row, - }; - } - _ => panic!("inconsistent changes in input chunk"), - } - } else { - changes_merged.insert(pk, record); + changes_merged.insert(pk, Record::Delete { old_row: row }); } } } @@ -368,7 +345,6 @@ impl OverWindowExecutor { &mut cache, this.cache_policy, &this.calls, - &this.partition_key_indices, &this.order_key_data_types, &this.order_key_order_types, &this.order_key_indices, diff --git a/src/stream/src/executor/over_window/over_partition.rs b/src/stream/src/executor/over_window/over_partition.rs index ab785acd9b681..42529a1c80587 100644 --- a/src/stream/src/executor/over_window/over_partition.rs +++ b/src/stream/src/executor/over_window/over_partition.rs @@ -19,11 +19,8 @@ use std::collections::{BTreeMap, HashSet, VecDeque}; use std::marker::PhantomData; use std::ops::{Bound, RangeInclusive}; -use futures::stream::select_all; -use futures::{stream, StreamExt, TryStreamExt}; use futures_async_stream::for_await; use risingwave_common::array::stream_record::Record; -use risingwave_common::hash::VnodeBitmapExt; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::session_config::OverWindowCachePolicy as CachePolicy; use risingwave_common::types::DataType; @@ -31,7 +28,6 @@ use risingwave_common::util::memcmp_encoding; use risingwave_common::util::sort_util::OrderType; use risingwave_expr::window_function::{FrameBounds, StateKey, WindowFuncCall}; use risingwave_storage::store::PrefetchOptions; -use risingwave_storage::table::merge_sort::merge_sort; use risingwave_storage::StateStore; use super::delta_btree_map::Change; @@ -75,7 +71,7 @@ pub(super) fn shrink_partition_cache( cache_policy: CachePolicy, recently_accessed_range: RangeInclusive, ) { - tracing::debug!( + tracing::trace!( this_partition_key=?this_partition_key, cache_policy=?cache_policy, recently_accessed_range=?recently_accessed_range, @@ -199,7 +195,7 @@ pub(super) fn shrink_partition_cache( } }; - tracing::debug!( + tracing::trace!( this_partition_key=?this_partition_key, retain_range=?(&start..=&end), "retain range in the range cache" @@ -230,12 +226,11 @@ pub(super) struct OverPartition<'a, S: StateStore> { cache_policy: CachePolicy, calls: &'a [WindowFuncCall], - partition_key_indices: &'a [usize], order_key_data_types: &'a [DataType], order_key_order_types: &'a [OrderType], order_key_indices: &'a [usize], input_pk_indices: &'a [usize], - state_key_to_table_pk_proj: Vec, + state_key_to_table_sub_pk_proj: Vec, _phantom: PhantomData, } @@ -248,20 +243,16 @@ impl<'a, S: StateStore> OverPartition<'a, S> { cache: &'a mut PartitionCache, cache_policy: CachePolicy, calls: &'a [WindowFuncCall], - partition_key_indices: &'a [usize], order_key_data_types: &'a [DataType], order_key_order_types: &'a [OrderType], order_key_indices: &'a [usize], input_pk_indices: &'a [usize], ) -> Self { // TODO(rc): move the calculation to executor? - let mut projection = Vec::with_capacity( - partition_key_indices.len() + order_key_indices.len() + input_pk_indices.len(), - ); + let mut projection = Vec::with_capacity(order_key_indices.len() + input_pk_indices.len()); let mut col_dedup = HashSet::new(); - for (proj_idx, key_idx) in partition_key_indices + for (proj_idx, key_idx) in order_key_indices .iter() - .chain(order_key_indices.iter()) .chain(input_pk_indices.iter()) .enumerate() { @@ -277,12 +268,11 @@ impl<'a, S: StateStore> OverPartition<'a, S> { cache_policy, calls, - partition_key_indices, order_key_data_types, order_key_order_types, order_key_indices, input_pk_indices, - state_key_to_table_pk_proj: projection, + state_key_to_table_sub_pk_proj: projection, _phantom: PhantomData, } } @@ -431,16 +421,16 @@ impl<'a, S: StateStore> OverPartition<'a, S> { if left_reached_sentinel { // TODO(rc): should count cache miss for this, and also the below - tracing::debug!(partition=?self.this_partition_key, "partition cache left extension triggered"); + tracing::trace!(partition=?self.this_partition_key, "partition cache left extension triggered"); let left_most = self.cache_real_first_key().unwrap_or(delta_first).clone(); self.extend_cache_leftward_by_n(table, &left_most).await?; } if right_reached_sentinel { - tracing::debug!(partition=?self.this_partition_key, "partition cache right extension triggered"); + tracing::trace!(partition=?self.this_partition_key, "partition cache right extension triggered"); let right_most = self.cache_real_last_key().unwrap_or(delta_last).clone(); self.extend_cache_rightward_by_n(table, &right_most).await?; } - tracing::debug!(partition=?self.this_partition_key, "partition cache extended"); + tracing::trace!(partition=?self.this_partition_key, "partition cache extended"); } } @@ -453,12 +443,14 @@ impl<'a, S: StateStore> OverPartition<'a, S> { return Ok(()); } - tracing::debug!(partition=?self.this_partition_key, "loading the whole partition into cache"); + tracing::trace!(partition=?self.this_partition_key, "loading the whole partition into cache"); let mut new_cache = PartitionCache::new(); // shouldn't use `new_empty_partition_cache` here because we don't want sentinels + let sub_range: &(Bound, Bound) = &(Bound::Unbounded, Bound::Unbounded); let table_iter = table - .iter_row_with_pk_prefix( + .iter_with_prefix( self.this_partition_key, + sub_range, PrefetchOptions::new_for_exhaust_iter(), ) .await?; @@ -506,17 +498,17 @@ impl<'a, S: StateStore> OverPartition<'a, S> { if self.cache_real_len() == 0 { // no normal entry in the cache, just load the given range - let table_pk_range = ( - Bound::Included(self.state_key_to_table_pk(range.start())?), - Bound::Included(self.state_key_to_table_pk(range.end())?), + let table_sub_range = ( + Bound::Included(self.state_key_to_table_sub_pk(range.start())?), + Bound::Included(self.state_key_to_table_sub_pk(range.end())?), ); tracing::debug!( partition=?self.this_partition_key, - table_pk_range=?table_pk_range, + table_sub_range=?table_sub_range, "cache is empty, just loading the given range" ); return self - .extend_cache_by_range_inner(table, table_pk_range) + .extend_cache_by_range_inner(table, table_sub_range) .await; } @@ -526,33 +518,33 @@ impl<'a, S: StateStore> OverPartition<'a, S> { if self.cache_left_is_sentinel() && *range.start() < cache_real_first_key { // extend leftward only if there's smallest sentinel - let table_pk_range = ( - Bound::Included(self.state_key_to_table_pk(range.start())?), - Bound::Excluded(self.state_key_to_table_pk(cache_real_first_key)?), + let table_sub_range = ( + Bound::Included(self.state_key_to_table_sub_pk(range.start())?), + Bound::Excluded(self.state_key_to_table_sub_pk(cache_real_first_key)?), ); - tracing::debug!( + tracing::trace!( partition=?self.this_partition_key, - table_pk_range=?table_pk_range, + table_sub_range=?table_sub_range, "loading the left half of given range" ); return self - .extend_cache_by_range_inner(table, table_pk_range) + .extend_cache_by_range_inner(table, table_sub_range) .await; } if self.cache_right_is_sentinel() && *range.end() > cache_real_last_key { // extend rightward only if there's largest sentinel - let table_pk_range = ( - Bound::Excluded(self.state_key_to_table_pk(cache_real_last_key)?), - Bound::Included(self.state_key_to_table_pk(range.end())?), + let table_sub_range = ( + Bound::Excluded(self.state_key_to_table_sub_pk(cache_real_last_key)?), + Bound::Included(self.state_key_to_table_sub_pk(range.end())?), ); - tracing::debug!( + tracing::trace!( partition=?self.this_partition_key, - table_pk_range=?table_pk_range, + table_sub_range=?table_sub_range, "loading the right half of given range" ); return self - .extend_cache_by_range_inner(table, table_pk_range) + .extend_cache_by_range_inner(table, table_sub_range) .await; } @@ -567,24 +559,18 @@ impl<'a, S: StateStore> OverPartition<'a, S> { async fn extend_cache_by_range_inner( &mut self, table: &StateTable, - table_pk_range: (Bound, Bound), + table_sub_range: (Bound, Bound), ) -> StreamExecutorResult<()> { - let streams = stream::iter(table.vnode_bitmap().iter_vnodes()) - .map(|vnode| { - table.iter_row_with_pk_range( - &table_pk_range, - vnode, - PrefetchOptions::new_for_exhaust_iter(), - ) - }) - .buffer_unordered(10) - .try_collect::>() - .await? - .into_iter() - .map(Box::pin); + let stream = table + .iter_with_prefix( + self.this_partition_key, + &table_sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) + .await?; #[for_await] - for row in select_all(streams) { + for row in stream { let row: OwnedRow = row?.into_owned_row(); let key = self.row_to_state_key(&row)?; self.range_cache.insert(CacheKey::from(key), row); @@ -645,25 +631,20 @@ impl<'a, S: StateStore> OverPartition<'a, S> { ) -> StreamExecutorResult<()> { let mut to_extend: VecDeque = VecDeque::with_capacity(MAGIC_BATCH_SIZE); { - let pk_range = ( - Bound::Included(self.this_partition_key.into_owned_row()), - Bound::Excluded(self.state_key_to_table_pk(range_to_exclusive)?), + let sub_range = ( + Bound::::Unbounded, + Bound::Excluded(self.state_key_to_table_sub_pk(range_to_exclusive)?), ); - let streams: Vec<_> = - futures::future::try_join_all(table.vnode_bitmap().iter_vnodes().map(|vnode| { - table.iter_row_with_pk_range( - &pk_range, - vnode, - PrefetchOptions::new_for_exhaust_iter(), - ) - })) - .await? - .into_iter() - .map(Box::pin) - .collect(); + let stream = table + .iter_with_prefix( + self.this_partition_key, + &sub_range, + PrefetchOptions::new_for_exhaust_iter(), + ) + .await?; #[for_await] - for row in merge_sort(streams) { + for row in stream { let row: OwnedRow = row?.into_owned_row(); // For leftward extension, we now must iterate the table in order from the beginning @@ -741,33 +722,22 @@ impl<'a, S: StateStore> OverPartition<'a, S> { ) -> StreamExecutorResult<()> { let mut n_extended = 0usize; { - let pk_range = ( - Bound::Excluded(self.state_key_to_table_pk(range_from_exclusive)?), - // currently we can't get the first possible key after this partition, so use - // `Unbounded` plus manual check for workaround + let sub_range = ( + Bound::Excluded(self.state_key_to_table_sub_pk(range_from_exclusive)?), Bound::::Unbounded, ); - let streams: Vec<_> = - futures::future::try_join_all(table.vnode_bitmap().iter_vnodes().map(|vnode| { - table.iter_row_with_pk_range(&pk_range, vnode, PrefetchOptions::default()) - })) - .await? - .into_iter() - .map(Box::pin) - .collect(); + let stream = table + .iter_with_prefix( + self.this_partition_key, + &sub_range, + PrefetchOptions::default(), + ) + .await?; #[for_await] - for row in merge_sort(streams) { + for row in stream { let row: OwnedRow = row?.into_owned_row(); - if !Row::eq( - self.this_partition_key, - (&row).project(self.partition_key_indices), - ) { - // we've reached the end of this partition - break; - } - let key = self.row_to_state_key(&row)?; self.range_cache.insert(CacheKey::from(key), row); @@ -786,17 +756,16 @@ impl<'a, S: StateStore> OverPartition<'a, S> { Ok(()) } - fn state_key_to_table_pk(&self, key: &StateKey) -> StreamExecutorResult { - Ok(self - .this_partition_key - .chain(memcmp_encoding::decode_row( - &key.order_key, - self.order_key_data_types, - self.order_key_order_types, - )?) - .chain(key.pk.as_inner()) - .project(&self.state_key_to_table_pk_proj) - .into_owned_row()) + /// Convert [`StateKey`] to sub pk (pk without partition key) as [`OwnedRow`]. + fn state_key_to_table_sub_pk(&self, key: &StateKey) -> StreamExecutorResult { + Ok(memcmp_encoding::decode_row( + &key.order_key, + self.order_key_data_types, + self.order_key_order_types, + )? + .chain(key.pk.as_inner()) + .project(&self.state_key_to_table_sub_pk_proj) + .into_owned_row()) } fn row_to_state_key(&self, full_row: impl Row + Copy) -> StreamExecutorResult { @@ -986,7 +955,7 @@ mod find_affected_ranges_tests { use itertools::Itertools; use risingwave_common::types::{DataType, ScalarImpl}; - use risingwave_expr::agg::{AggArgs, AggKind}; + use risingwave_expr::aggregate::{AggArgs, AggKind}; use risingwave_expr::window_function::{Frame, FrameBound, WindowFuncKind}; use super::*; diff --git a/src/stream/src/executor/project.rs b/src/stream/src/executor/project.rs index c13c9a86ca78e..8cfebfecd3f33 100644 --- a/src/stream/src/executor/project.rs +++ b/src/stream/src/executor/project.rs @@ -21,7 +21,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::row::{Row, RowExt}; use risingwave_common::types::ToOwnedDatum; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::expr::NonStrictExpression; use super::*; @@ -34,11 +34,11 @@ pub struct ProjectExecutor { } struct Inner { - ctx: ActorContextRef, + _ctx: ActorContextRef, info: ExecutorInfo, /// Expressions of the current projection. - exprs: Vec, + exprs: Vec, /// All the watermark derivations, (input_column_index, output_column_index). And the /// derivation expression is the project's expression itself. watermark_derivations: MultiMap, @@ -58,7 +58,7 @@ impl ProjectExecutor { ctx: ActorContextRef, input: Box, pk_indices: PkIndices, - exprs: Vec, + exprs: Vec, executor_id: u64, watermark_derivations: MultiMap, nondecreasing_expr_indices: Vec, @@ -82,7 +82,7 @@ impl ProjectExecutor { Self { input, inner: Inner { - ctx, + _ctx: ctx, info: ExecutorInfo { schema, pk_indices: info.pk_indices, @@ -138,16 +138,11 @@ impl Inner { let mut projected_columns = Vec::new(); for expr in &self.exprs { - let evaluated_expr = expr - .eval_infallible(&data_chunk, |err| { - self.ctx.on_compute_error(err, &self.info.identity) - }) - .await; + let evaluated_expr = expr.eval_infallible(&data_chunk).await; projected_columns.push(evaluated_expr); } let (_, vis) = data_chunk.into_parts(); - let vis = vis.into_visibility(); - let new_chunk = StreamChunk::new(ops, projected_columns, vis); + let new_chunk = StreamChunk::with_visibility(ops, projected_columns, vis); Ok(Some(new_chunk)) } @@ -161,12 +156,7 @@ impl Inner { let out_col_idx = *out_col_idx; let derived_watermark = watermark .clone() - .transform_with_expr(&self.exprs[out_col_idx], out_col_idx, |err| { - self.ctx.on_compute_error( - err, - &(self.info.identity.to_string() + "(when computing watermark)"), - ) - }) + .transform_with_expr(&self.exprs[out_col_idx], out_col_idx) .await; if let Some(derived_watermark) = derived_watermark { ret.push(derived_watermark); @@ -243,11 +233,12 @@ mod tests { use risingwave_common::array::{DataChunk, StreamChunk}; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::{DataType, Datum}; - use risingwave_expr::expr::{self, build_from_pretty, Expression, ValueImpl}; + use risingwave_expr::expr::{self, Expression, ValueImpl}; use super::super::test_utils::MockSource; use super::super::*; use super::*; + use crate::executor::test_utils::expr::build_from_pretty; use crate::executor::test_utils::StreamExecutorTestExt; #[tokio::test] @@ -269,14 +260,15 @@ mod tests { Field::unnamed(DataType::Int64), ], }; - let (mut tx, source) = MockSource::channel(schema, PkIndices::new()); + let pk_indices = vec![0]; + let (mut tx, source) = MockSource::channel(schema, pk_indices.clone()); let test_expr = build_from_pretty("(add:int8 $0:int8 $1:int8)"); let project = Box::new(ProjectExecutor::new( ActorContext::create(123), Box::new(source), - vec![], + pk_indices, vec![test_expr], 1, MultiMap::new(), @@ -354,7 +346,7 @@ mod tests { let a_expr = build_from_pretty("(add:int8 $0:int8 1:int8)"); let b_expr = build_from_pretty("(subtract:int8 $0:int8 1:int8)"); - let c_expr = DummyNondecreasingExpr.boxed(); + let c_expr = NonStrictExpression::for_test(DummyNondecreasingExpr); let project = Box::new(ProjectExecutor::new( ActorContext::create(123), diff --git a/src/stream/src/executor/project_set.rs b/src/stream/src/executor/project_set.rs index f1962d456b2e1..ff3214db88eaa 100644 --- a/src/stream/src/executor/project_set.rs +++ b/src/stream/src/executor/project_set.rs @@ -24,6 +24,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::row::{Row, RowExt}; use risingwave_common::types::{DataType, Datum, DatumRef, ToOwnedDatum}; use risingwave_common::util::iter_util::ZipEqFast; +use risingwave_expr::expr::{LogReport, NonStrictExpression}; use risingwave_expr::table_function::ProjectSetSelectItem; use super::error::StreamExecutorError; @@ -45,7 +46,7 @@ pub struct ProjectSetExecutor { struct Inner { info: ExecutorInfo, - ctx: ActorContextRef, + _ctx: ActorContextRef, /// Expressions of the current project_section. select_list: Vec, chunk_size: usize, @@ -83,7 +84,7 @@ impl ProjectSetExecutor { let inner = Inner { info, - ctx, + _ctx: ctx, select_list, chunk_size, watermark_derivations, @@ -260,12 +261,11 @@ impl Inner { ProjectSetSelectItem::Expr(expr) => { watermark .clone() - .transform_with_expr(expr, expr_idx + PROJ_ROW_ID_OFFSET, |err| { - self.ctx.on_compute_error( - err, - &(self.info.identity.to_string() + "(when computing watermark)"), - ) - }) + .transform_with_expr( + // TODO: should we build `expr` in non-strict mode? + &NonStrictExpression::new_topmost(expr, LogReport), + expr_idx + PROJ_ROW_ID_OFFSET, + ) .await } ProjectSetSelectItem::TableFunction(_) => { diff --git a/src/stream/src/executor/rearranged_chain.rs b/src/stream/src/executor/rearranged_chain.rs index 1ad43de432551..d2aaae9fd5025 100644 --- a/src/stream/src/executor/rearranged_chain.rs +++ b/src/stream/src/executor/rearranged_chain.rs @@ -135,6 +135,8 @@ impl RearrangedChainExecutor { .unbounded_send(RearrangedMessage::PhantomBarrier(first_barrier)) .unwrap(); + let mut processed_rows: u64 = 0; + { // 3. Rearrange stream, will yield the barriers polled from upstream to rearrange. let rearranged_barrier = @@ -162,8 +164,6 @@ impl RearrangedChainExecutor { let mut last_rearranged_epoch = create_epoch; let mut stop_rearrange_tx = Some(stop_rearrange_tx); - let mut processed_rows: u64 = 0; - #[for_await] for rearranged_msg in &mut rearranged { match rearranged_msg? { @@ -223,7 +223,7 @@ impl RearrangedChainExecutor { continue; }; if let Some(barrier) = msg.as_barrier() { - self.progress.finish(barrier.epoch.curr); + self.progress.finish(barrier.epoch.curr, processed_rows); } yield msg; } @@ -236,7 +236,7 @@ impl RearrangedChainExecutor { for msg in upstream { let msg: Message = msg?; if let Some(barrier) = msg.as_barrier() { - self.progress.finish(barrier.epoch.curr); + self.progress.finish(barrier.epoch.curr, processed_rows); } yield msg; } diff --git a/src/stream/src/executor/receiver.rs b/src/stream/src/executor/receiver.rs index 1c0e7c6399a56..5b96cf6f9f8d8 100644 --- a/src/stream/src/executor/receiver.rs +++ b/src/stream/src/executor/receiver.rs @@ -116,6 +116,7 @@ impl Executor for ReceiverExecutor { fn execute(mut self: Box) -> BoxedMessageStream { let actor_id = self.actor_context.id; let actor_id_str = actor_id.to_string(); + let fragment_id_str = self.fragment_id.to_string(); let mut upstream_fragment_id_str = self.upstream_fragment_id.to_string(); let stream = #[try_stream] @@ -124,7 +125,11 @@ impl Executor for ReceiverExecutor { while let Some(msg) = self.input.next().await { self.metrics .actor_input_buffer_blocking_duration_ns - .with_label_values(&[&actor_id_str, &upstream_fragment_id_str]) + .with_label_values(&[ + &actor_id_str, + &fragment_id_str, + &upstream_fragment_id_str, + ]) .inc_by(start_time.elapsed().as_nanos() as u64); let mut msg: Message = msg?; @@ -135,11 +140,11 @@ impl Executor for ReceiverExecutor { Message::Chunk(chunk) => { self.metrics .actor_in_record_cnt - .with_label_values(&[&actor_id_str]) + .with_label_values(&[&actor_id_str, &fragment_id_str]) .inc_by(chunk.cardinality() as _); } Message::Barrier(barrier) => { - tracing::trace!( + tracing::debug!( target: "events::stream::barrier::path", actor_id = actor_id, "receiver receives barrier from path: {:?}", diff --git a/src/stream/src/executor/row_id_gen.rs b/src/stream/src/executor/row_id_gen.rs index c8502c770cea7..88a11f03c663b 100644 --- a/src/stream/src/executor/row_id_gen.rs +++ b/src/stream/src/executor/row_id_gen.rs @@ -107,7 +107,7 @@ impl RowIdGenExecutor { let (ops, mut columns, bitmap) = chunk.into_inner(); columns[self.row_id_index] = self.gen_row_id_column_by_op(&columns[self.row_id_index], &ops); - yield Message::Chunk(StreamChunk::new(ops, columns, bitmap.into_visibility())); + yield Message::Chunk(StreamChunk::with_visibility(ops, columns, bitmap)); } Message::Barrier(barrier) => { // Update row id generator if vnode mapping changes. diff --git a/src/stream/src/executor/simple_agg.rs b/src/stream/src/executor/simple_agg.rs index a3109db69b4e3..6e88241f48433 100644 --- a/src/stream/src/executor/simple_agg.rs +++ b/src/stream/src/executor/simple_agg.rs @@ -17,7 +17,7 @@ use futures_async_stream::try_stream; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::{build, AggCall, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{build_retractable, AggCall, BoxedAggregateFunction}; use risingwave_storage::StateStore; use super::agg_common::{AggExecutorArgs, SimpleAggExecutorExtraArgs}; @@ -73,10 +73,10 @@ struct ExecutorInner { /// State storage for each agg calls. storages: Vec>, - /// State table for the previous result of all agg calls. - /// The outputs of all managed agg states are collected and stored in this + /// Intermediate state table for value-state agg calls. + /// The state of all value-state aggregates are collected and stored in this /// table when `flush_data` is called. - result_table: StateTable, + intermediate_state_table: StateTable, /// State tables for deduplicating rows on distinct key for distinct agg calls. /// One table per distinct column (may be shared by multiple agg calls). @@ -95,11 +95,7 @@ impl ExecutorInner { fn all_state_tables_mut(&mut self) -> impl Iterator> { iter_table_storage(&mut self.storages) .chain(self.distinct_dedup_tables.values_mut()) - .chain(std::iter::once(&mut self.result_table)) - } - - fn all_state_tables_except_result_mut(&mut self) -> impl Iterator> { - iter_table_storage(&mut self.storages).chain(self.distinct_dedup_tables.values_mut()) + .chain(std::iter::once(&mut self.intermediate_state_table)) } } @@ -147,11 +143,11 @@ impl SimpleAggExecutor { }, input_pk_indices: input_info.pk_indices, input_schema: input_info.schema, - agg_funcs: args.agg_calls.iter().map(build).try_collect()?, + agg_funcs: args.agg_calls.iter().map(build_retractable).try_collect()?, agg_calls: args.agg_calls, row_count_index: args.row_count_index, storages: args.storages, - result_table: args.result_table, + intermediate_state_table: args.intermediate_state_table, distinct_dedup_tables: args.distinct_dedup_tables, watermark_epoch: args.watermark_epoch, extreme_cache_size: args.extreme_cache_size, @@ -173,8 +169,7 @@ impl SimpleAggExecutor { // Calculate the row visibility for every agg call. let mut call_visibilities = Vec::with_capacity(this.agg_calls.len()); for agg_call in &this.agg_calls { - let vis = - agg_call_filter_res(&this.actor_ctx, &this.info.identity, agg_call, &chunk).await?; + let vis = agg_call_filter_res(agg_call, &chunk).await?; call_visibilities.push(vis); } @@ -220,30 +215,22 @@ impl SimpleAggExecutor { vars.distinct_dedup .flush(&mut this.distinct_dedup_tables, this.actor_ctx.clone())?; - // Commit all state tables except for result table. + // Flush states into intermediate state table. + let encoded_states = vars.agg_group.encode_states(&this.agg_funcs)?; + this.intermediate_state_table + .update_without_old_value(encoded_states); + + // Commit all state tables. futures::future::try_join_all( - this.all_state_tables_except_result_mut() - .map(|table| table.commit(epoch)), + this.all_state_tables_mut().map(|table| table.commit(epoch)), ) .await?; // Retrieve modified states and put the changes into the builders. - match vars - .agg_group + vars.agg_group .build_change(&this.storages, &this.agg_funcs) .await? - { - Some(change) => { - this.result_table.write_record(change.as_ref()); - this.result_table.commit(epoch).await?; - Some(change.to_stream_chunk(&this.info.schema.data_types())) - } - None => { - // Agg result is not changed. - this.result_table.commit_no_data_expected(epoch); - None - } - } + .map(|change| change.to_stream_chunk(&this.info.schema.data_types())) } else { // No state is changed. // Call commit on state table to increment the epoch. @@ -271,13 +258,13 @@ impl SimpleAggExecutor { }); let mut vars = ExecutionVars { - // Create `AggGroup`. This will fetch previous agg result from the result table. + // This will fetch previous agg states from the intermediate state table. agg_group: AggGroup::create( None, &this.agg_calls, &this.agg_funcs, &this.storages, - &this.result_table, + &this.intermediate_state_table, &this.input_pk_indices, this.row_count_index, this.extreme_cache_size, @@ -330,7 +317,7 @@ mod tests { use risingwave_common::array::stream_chunk::StreamChunkTestExt; use risingwave_common::catalog::Field; use risingwave_common::types::*; - use risingwave_expr::agg::AggCall; + use risingwave_expr::aggregate::AggCall; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::StateStore; diff --git a/src/stream/src/executor/sink.rs b/src/stream/src/executor/sink.rs index 375c3587a8650..70e63b4b33cd0 100644 --- a/src/stream/src/executor/sink.rs +++ b/src/stream/src/executor/sink.rs @@ -12,36 +12,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; -use std::time::Instant; +use std::mem; +use anyhow::anyhow; use futures::stream::select; use futures::{FutureExt, StreamExt}; use futures_async_stream::try_stream; use itertools::Itertools; -use prometheus::Histogram; -use risingwave_common::array::{Op, StreamChunk}; +use risingwave_common::array::stream_chunk::StreamChunkMut; +use risingwave_common::array::{merge_chunk_row, Op, StreamChunk, StreamChunkCompactor}; use risingwave_common::catalog::{ColumnCatalog, Field, Schema}; -use risingwave_common::types::DataType; -use risingwave_common::util::epoch::EpochPair; use risingwave_connector::dispatch_sink; -use risingwave_connector::sink::catalog::SinkType; +use risingwave_connector::sink::catalog::{SinkId, SinkType}; +use risingwave_connector::sink::log_store::{ + LogReader, LogReaderExt, LogStoreFactory, LogWriter, LogWriterExt, +}; use risingwave_connector::sink::{ - build_sink, Sink, SinkImpl, SinkParam, SinkWriter, SinkWriterParam, + build_sink, LogSinker, Sink, SinkImpl, SinkParam, SinkWriterParam, }; use super::error::{StreamExecutorError, StreamExecutorResult}; -use super::{BoxedExecutor, Executor, Message}; -use crate::common::log_store::{LogReader, LogStoreFactory, LogStoreReadItem, LogWriter}; -use crate::common::StreamChunkBuilder; -use crate::executor::monitor::StreamingMetrics; +use super::{BoxedExecutor, Executor, Message, PkIndices}; use crate::executor::{expect_first_barrier, ActorContextRef, BoxedMessageStream}; pub struct SinkExecutor { input: BoxedExecutor, - metrics: Arc, sink: SinkImpl, identity: String, + pk_indices: PkIndices, input_columns: Vec, input_schema: Schema, sink_param: SinkParam, @@ -51,32 +49,42 @@ pub struct SinkExecutor { sink_writer_param: SinkWriterParam, } -struct SinkMetrics { - sink_commit_duration_metrics: Histogram, +// Drop all the DELETE messages in this chunk and convert UPDATE INSERT into INSERT. +fn force_append_only(c: StreamChunk) -> StreamChunk { + let mut c: StreamChunkMut = c.into(); + for (_, mut r) in c.to_rows_mut() { + match r.op() { + Op::Insert => {} + Op::Delete | Op::UpdateDelete => r.set_vis(false), + Op::UpdateInsert => r.set_op(Op::Insert), + } + } + c.into() } -// Drop all the DELETE messages in this chunk and convert UPDATE INSERT into INSERT. -fn force_append_only(chunk: StreamChunk, data_types: Vec) -> Option { - let mut builder = StreamChunkBuilder::new(chunk.cardinality() + 1, data_types); - for (op, row_ref) in chunk.rows() { - if op == Op::Insert || op == Op::UpdateInsert { - let none = builder.append_row(Op::Insert, row_ref); - assert!(none.is_none()); +// Drop all the INSERT messages in this chunk and convert UPDATE DELETE into DELETE. +fn force_delete_only(c: StreamChunk) -> StreamChunk { + let mut c: StreamChunkMut = c.into(); + for (_, mut r) in c.to_rows_mut() { + match r.op() { + Op::Delete => {} + Op::Insert | Op::UpdateInsert => r.set_vis(false), + Op::UpdateDelete => r.set_op(Op::Delete), } } - builder.take() + c.into() } impl SinkExecutor { #[allow(clippy::too_many_arguments)] pub async fn new( input: BoxedExecutor, - metrics: Arc, sink_writer_param: SinkWriterParam, sink_param: SinkParam, columns: Vec, actor_context: ActorContextRef, log_store_factory: F, + pk_indices: PkIndices, ) -> StreamExecutorResult { let (log_reader, log_writer) = log_store_factory.build().await; @@ -87,9 +95,9 @@ impl SinkExecutor { .collect(); Ok(Self { input, - metrics, sink, identity: format!("SinkExecutor {:X?}", sink_writer_param.executor_id), + pk_indices, input_columns: columns, input_schema, sink_param, @@ -101,21 +109,23 @@ impl SinkExecutor { } fn execute_inner(self) -> BoxedMessageStream { - let sink_commit_duration_metrics = self - .metrics - .sink_commit_duration - .with_label_values(&[self.identity.as_str(), self.sink.get_connector()]); + let stream_key = self.pk_indices; - let sink_metrics = SinkMetrics { - sink_commit_duration_metrics, + let stream_key_sink_pk_mismatch = { + stream_key + .iter() + .any(|i| !self.sink_param.downstream_pk.contains(i)) }; let write_log_stream = Self::execute_write_log( self.input, - self.log_writer, - self.input_columns.clone(), + stream_key, + self.log_writer + .monitored(self.sink_writer_param.sink_metrics.clone()), + self.sink_param.sink_id, self.sink_param.sink_type, self.actor_context, + stream_key_sink_pk_mismatch, ); dispatch_sink!(self.sink, sink, { @@ -123,7 +133,6 @@ impl SinkExecutor { sink, self.log_reader, self.input_columns, - sink_metrics, self.sink_writer_param, ); select(consume_log_stream.into_stream(), write_log_stream).boxed() @@ -133,58 +142,144 @@ impl SinkExecutor { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_write_log( input: BoxedExecutor, + stream_key: PkIndices, mut log_writer: impl LogWriter, - columns: Vec, + sink_id: SinkId, sink_type: SinkType, actor_context: ActorContextRef, + stream_key_sink_pk_mismatch: bool, ) { let mut input = input.execute(); - let data_types = columns - .iter() - .map(|col| col.column_desc.data_type.clone()) - .collect_vec(); - let barrier = expect_first_barrier(&mut input).await?; let epoch_pair = barrier.epoch; - log_writer - .init(EpochPair::new_test_epoch(epoch_pair.curr)) - .await?; + log_writer.init(epoch_pair).await?; // Propagate the first barrier yield Message::Barrier(barrier); - #[for_await] - for msg in input { - match msg? { - Message::Watermark(w) => yield Message::Watermark(w), - Message::Chunk(chunk) => { - let visible_chunk = if sink_type == SinkType::ForceAppendOnly { - // Force append-only by dropping UPDATE/DELETE messages. We do this when the - // user forces the sink to be append-only while it is actually not based on - // the frontend derivation result. - force_append_only(chunk.clone(), data_types.clone()) - } else { - Some(chunk.clone().compact()) - }; - - if let Some(chunk) = visible_chunk { + // for metrics + let sink_id_str = sink_id.to_string(); + let actor_id_str = actor_context.id.to_string(); + let fragment_id_str = actor_context.fragment_id.to_string(); + + // When stream key is different from the user defined primary key columns for sinks. The operations could be out of order + // stream key: a,b + // sink pk: a + + // original: + // (1,1) -> (1,2) + // (1,2) -> (1,3) + + // mv fragment 1: + // delete (1,1) + + // mv fragment 2: + // insert (1,2) + // delete (1,2) + + // mv fragment 3: + // insert (1,3) + + // merge to sink fragment: + // insert (1,3) + // insert (1,2) + // delete (1,2) + // delete (1,1) + // So we do additional compaction in the sink executor per barrier. + + // 1. compact all the chanes with the stream key. + // 2. sink all the delete events and then sink all insert evernt. + + // after compacting with the stream key, the two event with the same used defined sink pk must have different stream key. + // So the delete event is not to delete the inserted record in our internal streaming SQL semantic. + if stream_key_sink_pk_mismatch && sink_type != SinkType::AppendOnly { + let mut chunk_buffer = StreamChunkCompactor::new(stream_key.clone()); + let mut watermark = None; + #[for_await] + for msg in input { + match msg? { + Message::Watermark(w) => watermark = Some(w), + Message::Chunk(c) => { + actor_context + .streaming_metrics + .sink_input_row_count + .with_label_values(&[&sink_id_str, &actor_id_str, &fragment_id_str]) + .inc_by(c.capacity() as u64); + + chunk_buffer.push_chunk(c); + } + Message::Barrier(barrier) => { + let mut delete_chunks = vec![]; + let mut insert_chunks = vec![]; + for c in mem::replace( + &mut chunk_buffer, + StreamChunkCompactor::new(stream_key.clone()), + ) + .into_compacted_chunks() + { + if sink_type != SinkType::ForceAppendOnly { + // Force append-only by dropping UPDATE/DELETE messages. We do this when the + // user forces the sink to be append-only while it is actually not based on + // the frontend derivation result. + delete_chunks.push(force_delete_only(c.clone())); + } + insert_chunks.push(force_append_only(c)); + } + + for c in delete_chunks.into_iter().chain(insert_chunks.into_iter()) { + log_writer.write_chunk(c.clone()).await?; + yield Message::Chunk(c); + } + if let Some(w) = mem::take(&mut watermark) { + yield Message::Watermark(w) + } + log_writer + .flush_current_epoch(barrier.epoch.curr, barrier.kind.is_checkpoint()) + .await?; + if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(actor_context.id) + { + log_writer.update_vnode_bitmap(vnode_bitmap); + } + yield Message::Barrier(barrier); + } + } + } + } else { + #[for_await] + for msg in input { + match msg? { + Message::Watermark(w) => yield Message::Watermark(w), + Message::Chunk(chunk) => { + // Compact the chunk to eliminate any useless intermediate result (e.g. UPDATE + // V->V). + let chunk = merge_chunk_row(chunk, &stream_key); + let chunk = if sink_type == SinkType::ForceAppendOnly { + // Force append-only by dropping UPDATE/DELETE messages. We do this when the + // user forces the sink to be append-only while it is actually not based on + // the frontend derivation result. + force_append_only(chunk) + } else { + chunk + }; + log_writer.write_chunk(chunk.clone()).await?; // Use original chunk instead of the reordered one as the executor output. yield Message::Chunk(chunk); } - } - Message::Barrier(barrier) => { - log_writer - .flush_current_epoch(barrier.epoch.curr, barrier.kind.is_checkpoint()) - .await?; - if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(actor_context.id) { - log_writer.update_vnode_bitmap(vnode_bitmap); + Message::Barrier(barrier) => { + log_writer + .flush_current_epoch(barrier.epoch.curr, barrier.kind.is_checkpoint()) + .await?; + if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(actor_context.id) + { + log_writer.update_vnode_bitmap(vnode_bitmap); + } + yield Message::Barrier(barrier); } - yield Message::Barrier(barrier); } } } @@ -192,13 +287,12 @@ impl SinkExecutor { async fn execute_consume_log( sink: S, - mut log_reader: R, + log_reader: R, columns: Vec, - sink_metrics: SinkMetrics, sink_writer_param: SinkWriterParam, ) -> StreamExecutorResult { - log_reader.init().await?; - let mut sink_writer = sink.new_writer(sink_writer_param).await?; + let metrics = sink_writer_param.sink_metrics.clone(); + let log_sinker = sink.new_log_sinker(sink_writer_param).await?; let visible_columns = columns .iter() @@ -206,94 +300,20 @@ impl SinkExecutor { .filter_map(|(idx, column)| (!column.is_hidden).then_some(idx)) .collect_vec(); - #[derive(Debug)] - enum LogConsumerState { - /// Mark that the log consumer is not initialized yet - Uninitialized, - - /// Mark that a new epoch has begun. - EpochBegun { curr_epoch: u64 }, - - /// Mark that the consumer has just received a barrier - BarrierReceived { prev_epoch: u64 }, - } - - let mut state = LogConsumerState::Uninitialized; - - loop { - let (epoch, item): (u64, LogStoreReadItem) = log_reader.next_item().await?; - if let LogStoreReadItem::UpdateVnodeBitmap(_) = &item { - match &state { - LogConsumerState::BarrierReceived { .. } => {} - _ => unreachable!( - "update vnode bitmap can be accepted only right after \ - barrier, but current state is {:?}", - state - ), - } - } - // begin_epoch when not previously began - state = match state { - LogConsumerState::Uninitialized => { - sink_writer.begin_epoch(epoch).await?; - LogConsumerState::EpochBegun { curr_epoch: epoch } - } - LogConsumerState::EpochBegun { curr_epoch } => { - assert!( - epoch >= curr_epoch, - "new epoch {} should not be below the current epoch {}", - epoch, - curr_epoch - ); - LogConsumerState::EpochBegun { curr_epoch: epoch } - } - LogConsumerState::BarrierReceived { prev_epoch } => { - assert!( - epoch > prev_epoch, - "new epoch {} should be greater than prev epoch {}", - epoch, - prev_epoch - ); - sink_writer.begin_epoch(epoch).await?; - LogConsumerState::EpochBegun { curr_epoch: epoch } - } - }; - match item { - LogStoreReadItem::StreamChunk(chunk) => { - let chunk = if visible_columns.len() != columns.len() { - // Do projection here because we may have columns that aren't visible to - // the downstream. - chunk.project(&visible_columns) - } else { - chunk - }; - if let Err(e) = sink_writer.write_batch(chunk).await { - sink_writer.abort().await?; - return Err(e.into()); - } - } - LogStoreReadItem::Barrier { is_checkpoint } => { - if is_checkpoint { - let start_time = Instant::now(); - sink_writer.barrier(true).await?; - sink_metrics - .sink_commit_duration_metrics - .observe(start_time.elapsed().as_millis() as f64); - log_reader.truncate().await?; - } else { - sink_writer.barrier(false).await?; - } - let prev_epoch = match state { - LogConsumerState::EpochBegun { curr_epoch } => curr_epoch, - _ => unreachable!("epoch must have begun before handling barrier"), - }; - state = LogConsumerState::BarrierReceived { prev_epoch } - } - LogStoreReadItem::UpdateVnodeBitmap(vnode_bitmap) => { - sink_writer.update_vnode_bitmap(vnode_bitmap).await?; + let log_reader = log_reader + .transform_chunk(move |chunk| { + if visible_columns.len() != columns.len() { + // Do projection here because we may have columns that aren't visible to + // the downstream. + chunk.project(&visible_columns) + } else { + chunk } - } - } + }) + .monitored(metrics); + + log_sinker.consume_log_and_sink(log_reader).await?; + Err(anyhow!("end of stream").into()) } } @@ -307,7 +327,7 @@ impl Executor for SinkExecutor { } fn pk_indices(&self) -> super::PkIndicesRef<'_> { - &self.sink_param.pk_indices + &self.pk_indices } fn identity(&self) -> &str { @@ -320,7 +340,7 @@ mod test { use risingwave_common::catalog::{ColumnDesc, ColumnId}; use super::*; - use crate::common::log_store::in_mem::BoundedInMemLogStoreFactory; + use crate::common::log_store_impl::in_mem::BoundedInMemLogStoreFactory; use crate::executor::test_utils::*; use crate::executor::ActorContext; @@ -391,20 +411,21 @@ mod test { .filter(|col| !col.is_hidden) .map(|col| col.column_desc.clone()) .collect(), - pk_indices: pk.clone(), + downstream_pk: pk.clone(), sink_type: SinkType::ForceAppendOnly, + format_desc: None, db_name: "test".into(), sink_from_name: "test".into(), }; let sink_executor = SinkExecutor::new( Box::new(mock), - Arc::new(StreamingMetrics::unused()), - SinkWriterParam::default(), + SinkWriterParam::for_test(), sink_param, columns.clone(), ActorContext::create(0), BoundedInMemLogStoreFactory::new(1), + pk, ) .await .unwrap(); @@ -416,7 +437,7 @@ mod test { let chunk_msg = executor.next().await.unwrap().unwrap(); assert_eq!( - chunk_msg.into_chunk().unwrap(), + chunk_msg.into_chunk().unwrap().compact(), StreamChunk::from_pretty( " I I I + 3 2 1", @@ -428,7 +449,7 @@ mod test { let chunk_msg = executor.next().await.unwrap().unwrap(); assert_eq!( - chunk_msg.into_chunk().unwrap(), + chunk_msg.into_chunk().unwrap().compact(), StreamChunk::from_pretty( " I I I + 3 4 1 @@ -443,6 +464,147 @@ mod test { executor.next().await.unwrap().unwrap(); } + #[tokio::test] + async fn stream_key_sink_pk_mismatch() { + use risingwave_common::array::stream_chunk::StreamChunk; + use risingwave_common::array::StreamChunkTestExt; + use risingwave_common::types::DataType; + + use crate::executor::Barrier; + + let properties = maplit::hashmap! { + "connector".into() => "blackhole".into(), + }; + + // We have two visible columns and one hidden column. The hidden column will be pruned out + // within the sink executor. + let columns = vec![ + ColumnCatalog { + column_desc: ColumnDesc::unnamed(ColumnId::new(0), DataType::Int64), + is_hidden: false, + }, + ColumnCatalog { + column_desc: ColumnDesc::unnamed(ColumnId::new(1), DataType::Int64), + is_hidden: false, + }, + ColumnCatalog { + column_desc: ColumnDesc::unnamed(ColumnId::new(2), DataType::Int64), + is_hidden: true, + }, + ]; + let schema: Schema = columns + .iter() + .map(|column| Field::from(column.column_desc.clone())) + .collect(); + + let mock = MockSource::with_messages( + schema, + vec![0, 1], + vec![ + Message::Barrier(Barrier::new_test_barrier(1)), + Message::Chunk(std::mem::take(&mut StreamChunk::from_pretty( + " I I I + + 1 1 10", + ))), + Message::Barrier(Barrier::new_test_barrier(2)), + Message::Chunk(std::mem::take(&mut StreamChunk::from_pretty( + " I I I + + 1 3 30", + ))), + Message::Chunk(std::mem::take(&mut StreamChunk::from_pretty( + " I I I + + 1 2 20 + - 1 2 20", + ))), + Message::Chunk(std::mem::take(&mut StreamChunk::from_pretty( + " I I I + - 1 1 10", + ))), + Message::Barrier(Barrier::new_test_barrier(3)), + ], + ); + + let sink_param = SinkParam { + sink_id: 0.into(), + properties, + columns: columns + .iter() + .filter(|col| !col.is_hidden) + .map(|col| col.column_desc.clone()) + .collect(), + downstream_pk: vec![0], + sink_type: SinkType::Upsert, + format_desc: None, + db_name: "test".into(), + sink_from_name: "test".into(), + }; + + let sink_executor = SinkExecutor::new( + Box::new(mock), + SinkWriterParam::for_test(), + sink_param, + columns.clone(), + ActorContext::create(0), + BoundedInMemLogStoreFactory::new(1), + vec![0, 1], + ) + .await + .unwrap(); + + let mut executor = SinkExecutor::execute(Box::new(sink_executor)); + + // Barrier message. + executor.next().await.unwrap().unwrap(); + + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!(chunk_msg.into_chunk().unwrap().cardinality(), 0); + + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!( + chunk_msg.into_chunk().unwrap().compact(), + StreamChunk::from_pretty( + " I I I + + 1 1 10", + ) + ); + + // Barrier message. + executor.next().await.unwrap().unwrap(); + + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!(chunk_msg.into_chunk().unwrap().cardinality(), 0); + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!(chunk_msg.into_chunk().unwrap().cardinality(), 0); + + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!( + chunk_msg.into_chunk().unwrap().compact(), + StreamChunk::from_pretty( + " I I I + - 1 1 10", + ) + ); + + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!( + chunk_msg.into_chunk().unwrap().compact(), + StreamChunk::from_pretty( + " I I I + + 1 3 30", + ) + ); + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!(chunk_msg.into_chunk().unwrap().cardinality(), 0); + let chunk_msg = executor.next().await.unwrap().unwrap(); + assert_eq!(chunk_msg.into_chunk().unwrap().cardinality(), 0); + + // Should not receive the third stream chunk message because the force-append-only sink + // executor will drop all DELETE messages. + + // The last barrier message. + executor.next().await.unwrap().unwrap(); + } + #[tokio::test] async fn test_empty_barrier_sink() { use risingwave_common::types::DataType; @@ -488,20 +650,21 @@ mod test { .filter(|col| !col.is_hidden) .map(|col| col.column_desc.clone()) .collect(), - pk_indices: pk.clone(), + downstream_pk: pk.clone(), sink_type: SinkType::ForceAppendOnly, + format_desc: None, db_name: "test".into(), sink_from_name: "test".into(), }; let sink_executor = SinkExecutor::new( Box::new(mock), - Arc::new(StreamingMetrics::unused()), - SinkWriterParam::default(), + SinkWriterParam::for_test(), sink_param, columns, ActorContext::create(0), BoundedInMemLogStoreFactory::new(1), + pk, ) .await .unwrap(); diff --git a/src/stream/src/executor/sort_buffer.rs b/src/stream/src/executor/sort_buffer.rs index 2a4cd4bf6ad78..a1d6e3286ed5f 100644 --- a/src/stream/src/executor/sort_buffer.rs +++ b/src/stream/src/executor/sort_buffer.rs @@ -118,6 +118,18 @@ impl SortBuffer { self.cache.insert(key, new_row.into_owned_row()); } + /// Update a row in the buffer without giving the old value. + pub fn update_without_old_value( + &mut self, + new_row: impl Row, + buffer_table: &mut StateTable, + ) { + buffer_table.update_without_old_value(&new_row); + let key = row_to_cache_key(self.sort_column_index, &new_row, buffer_table); + self.cache.delete(&key); + self.cache.insert(key, new_row.into_owned_row()); + } + /// Apply a change to the buffer, insert/delete/update. pub fn apply_change(&mut self, change: Record, buffer_table: &mut StateTable) { match change { @@ -201,9 +213,9 @@ impl SortBuffer { let streams: Vec<_> = futures::future::try_join_all(buffer_table.vnode_bitmap().iter_vnodes().map(|vnode| { - buffer_table.iter_row_with_pk_range( - &pk_range, + buffer_table.iter_with_vnode( vnode, + &pk_range, PrefetchOptions::new_with_exhaust_iter(filler.capacity().is_none()), ) })) diff --git a/src/stream/src/executor/source/fetch_executor.rs b/src/stream/src/executor/source/fetch_executor.rs new file mode 100644 index 0000000000000..3e7ea84bcfcce --- /dev/null +++ b/src/stream/src/executor/source/fetch_executor.rs @@ -0,0 +1,350 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::{Debug, Formatter}; +use std::ops::Bound; +use std::sync::Arc; + +use either::Either; +use futures::pin_mut; +use futures::stream::{self, StreamExt}; +use futures_async_stream::try_stream; +use risingwave_common::catalog::{ColumnId, Schema, TableId}; +use risingwave_common::hash::VnodeBitmapExt; +use risingwave_common::row::{OwnedRow, Row}; +use risingwave_common::types::{ScalarRef, ScalarRefImpl}; +use risingwave_connector::source::filesystem::FsSplit; +use risingwave_connector::source::{ + BoxSourceWithStateStream, SourceContext, SourceCtrlOpts, SplitImpl, SplitMetaData, + StreamChunkWithState, +}; +use risingwave_connector::ConnectorParams; +use risingwave_source::source_desc::SourceDesc; +use risingwave_storage::store::PrefetchOptions; +use risingwave_storage::StateStore; + +use crate::executor::stream_reader::StreamReaderWithPause; +use crate::executor::{ + expect_first_barrier, ActorContextRef, BoxedExecutor, BoxedMessageStream, Executor, Message, + Mutation, PkIndices, PkIndicesRef, SourceStateTableHandler, StreamExecutorError, + StreamExecutorResult, StreamSourceCore, +}; + +const SPLIT_BATCH_SIZE: usize = 1000; + +type SplitBatch = Option>; + +pub struct FsFetchExecutor { + actor_ctx: ActorContextRef, + + identity: String, + + schema: Schema, + + pk_indices: PkIndices, + + /// Streaming source for external + stream_source_core: Option>, + + /// Upstream list executor. + upstream: Option, + + // control options for connector level + source_ctrl_opts: SourceCtrlOpts, + + // config for the connector node + connector_params: ConnectorParams, +} + +impl FsFetchExecutor { + #[allow(clippy::too_many_arguments)] + pub fn new( + actor_ctx: ActorContextRef, + schema: Schema, + pk_indices: PkIndices, + stream_source_core: StreamSourceCore, + executor_id: u64, + upstream: BoxedExecutor, + source_ctrl_opts: SourceCtrlOpts, + connector_params: ConnectorParams, + ) -> Self { + Self { + actor_ctx, + identity: format!("FsFetchExecutor {:X}", executor_id), + schema, + pk_indices, + stream_source_core: Some(stream_source_core), + upstream: Some(upstream), + source_ctrl_opts, + connector_params, + } + } + + async fn replace_with_new_batch_reader( + splits_on_fetch: &mut usize, + state_store_handler: &SourceStateTableHandler, + column_ids: Vec, + source_ctx: SourceContext, + source_desc: &SourceDesc, + stream: &mut StreamReaderWithPause, + ) -> StreamExecutorResult<()> { + let mut batch = Vec::with_capacity(SPLIT_BATCH_SIZE); + 'vnodes: for vnode in state_store_handler.state_store.vnodes().iter_vnodes() { + let table_iter = state_store_handler + .state_store + .iter_with_vnode( + vnode, + &(Bound::::Unbounded, Bound::::Unbounded), + PrefetchOptions::new_for_exhaust_iter(), + ) + .await?; + pin_mut!(table_iter); + + while let Some(item) = table_iter.next().await { + let row = item?; + let split = match row.datum_at(1) { + Some(ScalarRefImpl::Jsonb(jsonb_ref)) => { + SplitImpl::from(FsSplit::restore_from_json(jsonb_ref.to_owned_scalar())?) + } + _ => unreachable!(), + }; + batch.push(split); + + if batch.len() >= SPLIT_BATCH_SIZE { + break 'vnodes; + } + } + } + + if batch.is_empty() { + stream.replace_data_stream(stream::pending().boxed()); + } else { + *splits_on_fetch += batch.len(); + let batch_reader = + Self::build_batched_stream_reader(column_ids, source_ctx, source_desc, Some(batch)) + .await?; + stream.replace_data_stream(batch_reader); + } + + Ok(()) + } + + async fn build_batched_stream_reader( + column_ids: Vec, + source_ctx: SourceContext, + source_desc: &SourceDesc, + batch: SplitBatch, + ) -> StreamExecutorResult { + source_desc + .source + .stream_reader(batch, column_ids, Arc::new(source_ctx)) + .await + .map_err(StreamExecutorError::connector_error) + } + + fn build_source_ctx(&self, source_desc: &SourceDesc, source_id: TableId) -> SourceContext { + SourceContext::new_with_suppressor( + self.actor_ctx.id, + source_id, + self.actor_ctx.fragment_id, + source_desc.metrics.clone(), + self.source_ctrl_opts.clone(), + self.connector_params.connector_client.clone(), + self.actor_ctx.error_suppressor.clone(), + ) + } + + #[try_stream(ok = Message, error = StreamExecutorError)] + async fn into_stream(mut self) { + let mut upstream = self.upstream.take().unwrap().execute(); + let barrier = expect_first_barrier(&mut upstream).await?; + + let mut core = self.stream_source_core.take().unwrap(); + let mut state_store_handler = core.split_state_store; + + // Build source description from the builder. + let source_desc_builder = core.source_desc_builder.take().unwrap(); + + let source_desc = source_desc_builder + .build() + .map_err(StreamExecutorError::connector_error)?; + + // Initialize state table. + state_store_handler.init_epoch(barrier.epoch); + + let mut splits_on_fetch: usize = 0; + let mut stream = StreamReaderWithPause::::new( + upstream, + stream::pending().boxed(), + ); + + if barrier.is_pause_on_startup() { + stream.pause_stream(); + } + + // If it is a recovery startup, + // there can be file assignments in the state table. + // Hence we try building a reader first. + Self::replace_with_new_batch_reader( + &mut splits_on_fetch, + &state_store_handler, + core.column_ids.clone(), + self.build_source_ctx(&source_desc, core.source_id), + &source_desc, + &mut stream, + ) + .await?; + + yield Message::Barrier(barrier); + + while let Some(msg) = stream.next().await { + match msg { + Err(e) => { + tracing::error!("Fetch Error: {:?}", e); + splits_on_fetch = 0; + } + Ok(msg) => { + match msg { + // This branch will be preferred. + Either::Left(msg) => { + match &msg { + Message::Barrier(barrier) => { + if let Some(mutation) = barrier.mutation.as_deref() { + match mutation { + Mutation::Pause => stream.pause_stream(), + Mutation::Resume => stream.resume_stream(), + _ => (), + } + } + + state_store_handler + .state_store + .commit(barrier.epoch) + .await?; + + if let Some(vnode_bitmap) = + barrier.as_update_vnode_bitmap(self.actor_ctx.id) + { + // if _cache_may_stale, we must rebuild the stream to adjust vnode mappings + let (_prev_vnode_bitmap, cache_may_stale) = + state_store_handler + .state_store + .update_vnode_bitmap(vnode_bitmap); + + if cache_may_stale { + splits_on_fetch = 0; + } + } + + if splits_on_fetch == 0 { + Self::replace_with_new_batch_reader( + &mut splits_on_fetch, + &state_store_handler, + core.column_ids.clone(), + self.build_source_ctx(&source_desc, core.source_id), + &source_desc, + &mut stream, + ) + .await?; + } + + // Propagate the barrier. + yield msg; + } + // Receiving file assignments from upstream list executor, + // store into state table and try building a new reader. + Message::Chunk(chunk) => { + let file_assignment = chunk + .data_chunk() + .rows() + .map(|row| { + let filename = row.datum_at(0).unwrap().into_utf8(); + let size = row.datum_at(2).unwrap().into_int64(); + FsSplit::new(filename.to_owned(), 0, size as usize) + }) + .collect(); + state_store_handler.take_snapshot(file_assignment).await?; + } + _ => unreachable!(), + } + } + // StreamChunk from FsSourceReader, and the reader reads only one file. + // If the file read out, replace with a new file reader. + Either::Right(StreamChunkWithState { + chunk, + split_offset_mapping, + }) => { + let mapping = split_offset_mapping.unwrap(); + for (split_id, offset) in mapping { + let row = state_store_handler + .get(split_id.clone()) + .await? + .expect("The fs_split should be in the state table."); + let fs_split = match row.datum_at(1) { + Some(ScalarRefImpl::Jsonb(jsonb_ref)) => { + FsSplit::restore_from_json(jsonb_ref.to_owned_scalar())? + } + _ => unreachable!(), + }; + + if offset.parse::().unwrap() >= fs_split.size { + splits_on_fetch -= 1; + state_store_handler.delete(split_id).await?; + } else { + state_store_handler + .set(split_id, fs_split.encode_to_json()) + .await?; + } + } + + yield Message::Chunk(chunk); + } + } + } + } + } + } +} + +impl Executor for FsFetchExecutor { + fn execute(self: Box) -> BoxedMessageStream { + self.into_stream().boxed() + } + + fn schema(&self) -> &Schema { + &self.schema + } + + fn pk_indices(&self) -> PkIndicesRef<'_> { + &self.pk_indices + } + + fn identity(&self) -> &str { + self.identity.as_str() + } +} + +impl Debug for FsFetchExecutor { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if let Some(core) = &self.stream_source_core { + f.debug_struct("FsFetchExecutor") + .field("source_id", &core.source_id) + .field("column_ids", &core.column_ids) + .field("pk_indices", &self.pk_indices) + .finish() + } else { + f.debug_struct("FsFetchExecutor").finish() + } + } +} diff --git a/src/stream/src/executor/source/fs_source_executor.rs b/src/stream/src/executor/source/fs_source_executor.rs index aba89f817b09a..bba0e30eb5712 100644 --- a/src/stream/src/executor/source/fs_source_executor.rs +++ b/src/stream/src/executor/source/fs_source_executor.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// *** NOTICE: TO BE DEPRECATED *** // + use std::fmt::{Debug, Formatter}; use std::sync::Arc; @@ -112,12 +114,11 @@ impl FsSourceExecutor { None, self.actor_ctx.error_suppressor.clone(), ); - let stream_reader = source_desc + source_desc .source .stream_reader(state, column_ids, Arc::new(source_ctx)) .await - .map_err(StreamExecutorError::connector_error)?; - Ok(stream_reader.into_stream()) + .map_err(StreamExecutorError::connector_error) } async fn apply_split_change( diff --git a/src/stream/src/executor/source/list_executor.rs b/src/stream/src/executor/source/list_executor.rs new file mode 100644 index 0000000000000..53e8854594ce4 --- /dev/null +++ b/src/stream/src/executor/source/list_executor.rs @@ -0,0 +1,231 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Formatter; +use std::sync::Arc; + +use anyhow::anyhow; +use either::Either; +use futures::StreamExt; +use futures_async_stream::try_stream; +use risingwave_common::array::Op; +use risingwave_common::catalog::Schema; +use risingwave_common::system_param::local_manager::SystemParamsReaderRef; +use risingwave_connector::source::filesystem::FsPage; +use risingwave_connector::source::{BoxTryStream, SourceCtrlOpts}; +use risingwave_connector::ConnectorParams; +use risingwave_source::source_desc::{SourceDesc, SourceDescBuilder}; +use risingwave_storage::StateStore; +use tokio::sync::mpsc::UnboundedReceiver; + +use crate::executor::error::StreamExecutorError; +use crate::executor::monitor::StreamingMetrics; +use crate::executor::stream_reader::StreamReaderWithPause; +use crate::executor::*; + +#[allow(dead_code)] +pub struct FsListExecutor { + actor_ctx: ActorContextRef, + + identity: String, + + schema: Schema, + + pk_indices: PkIndices, + + /// Streaming source for external + stream_source_core: Option>, + + /// Metrics for monitor. + metrics: Arc, + + /// Receiver of barrier channel. + barrier_receiver: Option>, + + /// System parameter reader to read barrier interval + system_params: SystemParamsReaderRef, + + // control options for connector level + source_ctrl_opts: SourceCtrlOpts, + + // config for the connector node + connector_params: ConnectorParams, +} + +impl FsListExecutor { + #[allow(clippy::too_many_arguments)] + pub fn new( + actor_ctx: ActorContextRef, + schema: Schema, + pk_indices: PkIndices, + stream_source_core: Option>, + metrics: Arc, + barrier_receiver: UnboundedReceiver, + system_params: SystemParamsReaderRef, + executor_id: u64, + source_ctrl_opts: SourceCtrlOpts, + connector_params: ConnectorParams, + ) -> Self { + Self { + actor_ctx, + identity: format!("FsListExecutor {:X}", executor_id), + schema, + pk_indices, + stream_source_core, + metrics, + barrier_receiver: Some(barrier_receiver), + system_params, + source_ctrl_opts, + connector_params, + } + } + + async fn build_chunked_paginate_stream( + &self, + source_desc: &SourceDesc, + ) -> StreamExecutorResult> { + let stream = source_desc + .source + .get_source_list() + .await + .map_err(StreamExecutorError::connector_error)?; + + Ok(stream + .map(|item| item.map(Self::map_fs_page_to_chunk)) + .boxed()) + } + + fn map_fs_page_to_chunk(page: FsPage) -> StreamChunk { + let rows = page + .into_iter() + .map(|split| { + ( + Op::Insert, + OwnedRow::new(vec![ + Some(ScalarImpl::Utf8(split.name.into_boxed_str())), + Some(ScalarImpl::Timestamp(split.timestamp)), + Some(ScalarImpl::Int64(split.size)), + ]), + ) + }) + .collect::>(); + StreamChunk::from_rows( + &rows, + &[DataType::Varchar, DataType::Timestamp, DataType::Int64], + ) + } + + #[try_stream(ok = Message, error = StreamExecutorError)] + async fn into_stream(mut self) { + let mut barrier_receiver = self.barrier_receiver.take().unwrap(); + let barrier = barrier_receiver + .recv() + .instrument_await("source_recv_first_barrier") + .await + .ok_or_else(|| { + anyhow!( + "failed to receive the first barrier, actor_id: {:?}, source_id: {:?}", + self.actor_ctx.id, + self.stream_source_core.as_ref().unwrap().source_id + ) + })?; + + let mut core = self.stream_source_core.unwrap(); + + // Build source description from the builder. + let source_desc_builder: SourceDescBuilder = core.source_desc_builder.take().unwrap(); + let source_desc = source_desc_builder + .build() + .map_err(StreamExecutorError::connector_error)?; + + // Return the ownership of `stream_source_core` to the source executor. + self.stream_source_core = Some(core); + + let chunked_paginate_stream = self.build_chunked_paginate_stream(&source_desc).await?; + + let barrier_stream = barrier_to_message_stream(barrier_receiver).boxed(); + let mut stream = + StreamReaderWithPause::::new(barrier_stream, chunked_paginate_stream); + + if barrier.is_pause_on_startup() { + stream.pause_stream(); + } + + yield Message::Barrier(barrier); + + while let Some(msg) = stream.next().await { + match msg { + Err(e) => { + tracing::warn!("encountered an error, recovering. {:?}", e); + // todo: rebuild stream here + } + Ok(msg) => match msg { + // Barrier arrives. + Either::Left(msg) => match &msg { + Message::Barrier(barrier) => { + if let Some(mutation) = barrier.mutation.as_deref() { + match mutation { + Mutation::Pause => stream.pause_stream(), + Mutation::Resume => stream.resume_stream(), + _ => (), + } + } + + // Propagate the barrier. + yield msg; + } + // Only barrier can be received. + _ => unreachable!(), + }, + // Chunked FsPage arrives. + Either::Right(chunk) => { + yield Message::Chunk(chunk); + } + }, + } + } + } +} + +impl Executor for FsListExecutor { + fn execute(self: Box) -> BoxedMessageStream { + self.into_stream().boxed() + } + + fn schema(&self) -> &Schema { + &self.schema + } + + fn pk_indices(&self) -> PkIndicesRef<'_> { + &self.pk_indices + } + + fn identity(&self) -> &str { + self.identity.as_str() + } +} + +impl Debug for FsListExecutor { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if let Some(core) = &self.stream_source_core { + f.debug_struct("FsListExecutor") + .field("source_id", &core.source_id) + .field("column_ids", &core.column_ids) + .field("pk_indices", &self.pk_indices) + .finish() + } else { + f.debug_struct("FsListExecutor").finish() + } + } +} diff --git a/src/stream/src/executor/source/mod.rs b/src/stream/src/executor/source/mod.rs index 1b06120561f51..18f7346777d6b 100644 --- a/src/stream/src/executor/source/mod.rs +++ b/src/stream/src/executor/source/mod.rs @@ -19,12 +19,15 @@ mod fs_source_executor; pub use fs_source_executor::*; use risingwave_common::bail; pub use state_table_handler::*; +pub mod fetch_executor; +pub use fetch_executor::*; pub mod source_executor; +pub mod list_executor; pub mod state_table_handler; - use futures_async_stream::try_stream; +pub use list_executor::*; use tokio::sync::mpsc::UnboundedReceiver; use crate::executor::error::StreamExecutorError; diff --git a/src/stream/src/executor/source/state_table_handler.rs b/src/stream/src/executor/source/state_table_handler.rs index f1ee9f0c90d4b..d742e72a4c7a9 100644 --- a/src/stream/src/executor/source/state_table_handler.rs +++ b/src/stream/src/executor/source/state_table_handler.rs @@ -14,8 +14,10 @@ use std::collections::HashSet; use std::ops::{Bound, Deref}; +use std::sync::Arc; use futures::{pin_mut, StreamExt}; +use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::{DatabaseId, SchemaId}; use risingwave_common::constants::hummock::PROPERTIES_RETENTION_SECOND_KEY; use risingwave_common::hash::VirtualNode; @@ -36,7 +38,7 @@ use risingwave_storage::StateStore; use crate::common::table::state_table::StateTable; use crate::executor::error::StreamExecutorError; -use crate::executor::StreamExecutorResult; +use crate::executor::{StreamExecutorResult, BACKFILL_STATE_KEY_SUFFIX}; const COMPLETE_SPLIT_PREFIX: &str = "SsGLdzRDqBuKzMf9bDap"; @@ -56,6 +58,21 @@ impl SourceStateTableHandler { } } + pub async fn from_table_catalog_with_vnodes( + table_catalog: &PbTable, + store: S, + vnodes: Option>, + ) -> Self { + // The state of source should not be cleaned up by retention_seconds + assert!(!table_catalog + .properties + .contains_key(&String::from(PROPERTIES_RETENTION_SECOND_KEY))); + + Self { + state_store: StateTable::from_table_catalog(table_catalog, store, vnodes).await, + } + } + pub fn init_epoch(&mut self, epoch: EpochPair) { self.state_store.init_epoch(epoch); } @@ -84,9 +101,9 @@ impl SourceStateTableHandler { // all source executor has vnode id zero let iter = self .state_store - .iter_row_with_pk_range( - &(start, end), + .iter_with_vnode( VirtualNode::ZERO, + &(start, end), PrefetchOptions::new_for_exhaust_iter(), ) .await?; @@ -159,7 +176,7 @@ impl SourceStateTableHandler { Ok(()) } - async fn delete(&mut self, key: SplitId) -> StreamExecutorResult<()> { + pub async fn delete(&mut self, key: SplitId) -> StreamExecutorResult<()> { if let Some(prev_row) = self.get(key).await? { self.state_store.delete(prev_row); } @@ -203,16 +220,41 @@ impl SourceStateTableHandler { &mut self, stream_source_split: &SplitImpl, ) -> StreamExecutorResult> { - Ok(match self.get(stream_source_split.id()).await? { + let split_id = stream_source_split.id(); + Ok(match self.get(split_id.clone()).await? { None => None, Some(row) => match row.datum_at(1) { Some(ScalarRefImpl::Jsonb(jsonb_ref)) => { - Some(SplitImpl::restore_from_json(jsonb_ref.to_owned_scalar())?) + let mut split_impl = SplitImpl::restore_from_json(jsonb_ref.to_owned_scalar())?; + if let SplitImpl::MysqlCdc(ref mut split) = split_impl && let Some(mysql_split) = split.mysql_split.as_mut() { + // if the snapshot_done is not set, we should check whether the backfill is finished + if !mysql_split.inner.snapshot_done { + mysql_split.inner.snapshot_done = self.recover_cdc_snapshot_state(split_id).await?; + } + } + Some(split_impl) } _ => unreachable!(), }, }) } + + async fn recover_cdc_snapshot_state( + &mut self, + split_id: SplitId, + ) -> StreamExecutorResult { + let mut key = split_id.to_string(); + key.push_str(BACKFILL_STATE_KEY_SUFFIX); + + let flag = match self.get(key.into()).await? { + Some(row) => match row.datum_at(1) { + Some(ScalarRefImpl::Jsonb(jsonb_ref)) => jsonb_ref.as_bool()?, + _ => unreachable!("invalid cdc backfill persistent state"), + }, + None => false, + }; + Ok(flag) + } } // align with schema defined in `LogicalSource::infer_internal_table_catalog`. The function is used diff --git a/src/stream/src/executor/stateless_simple_agg.rs b/src/stream/src/executor/stateless_simple_agg.rs index cf2abbebae8ee..cc7e876f1d2de 100644 --- a/src/stream/src/executor/stateless_simple_agg.rs +++ b/src/stream/src/executor/stateless_simple_agg.rs @@ -18,7 +18,9 @@ use itertools::Itertools; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::catalog::Schema; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::agg::{build, AggCall, AggregateState, BoxedAggregateFunction}; +use risingwave_expr::aggregate::{ + build_retractable, AggCall, AggregateState, BoxedAggregateFunction, +}; use super::aggregation::{agg_call_filter_res, generate_agg_schema}; use super::error::StreamExecutorError; @@ -26,7 +28,7 @@ use super::*; use crate::error::StreamResult; pub struct StatelessSimpleAggExecutor { - ctx: ActorContextRef, + _ctx: ActorContextRef, pub(super) input: Box, pub(super) info: ExecutorInfo, pub(super) aggs: Vec, @@ -53,15 +55,13 @@ impl Executor for StatelessSimpleAggExecutor { impl StatelessSimpleAggExecutor { async fn apply_chunk( - ctx: &ActorContextRef, - identity: &str, agg_calls: &[AggCall], aggs: &[BoxedAggregateFunction], states: &mut [AggregateState], chunk: &StreamChunk, ) -> StreamExecutorResult<()> { for ((agg, call), state) in aggs.iter().zip_eq_fast(agg_calls).zip_eq_fast(states) { - let vis = agg_call_filter_res(ctx, identity, call, chunk).await?; + let vis = agg_call_filter_res(call, chunk).await?; let chunk = chunk.project_with_vis(call.args.val_indices(), vis); agg.update(state, &chunk).await?; } @@ -71,7 +71,7 @@ impl StatelessSimpleAggExecutor { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(self) { let StatelessSimpleAggExecutor { - ctx, + _ctx, input, info, aggs, @@ -87,8 +87,7 @@ impl StatelessSimpleAggExecutor { match msg { Message::Watermark(_) => {} Message::Chunk(chunk) => { - Self::apply_chunk(&ctx, &info.identity, &agg_calls, &aggs, &mut states, &chunk) - .await?; + Self::apply_chunk(&agg_calls, &aggs, &mut states, &chunk).await?; is_dirty = true; } m @ Message::Barrier(_) => { @@ -112,7 +111,7 @@ impl StatelessSimpleAggExecutor { .try_collect()?; let ops = vec![Op::Insert; 1]; - yield Message::Chunk(StreamChunk::new(ops, columns, None)); + yield Message::Chunk(StreamChunk::new(ops, columns)); } yield m; @@ -136,10 +135,10 @@ impl StatelessSimpleAggExecutor { pk_indices, identity: format!("StatelessSimpleAggExecutor-{}", executor_id), }; - let aggs = agg_calls.iter().map(build).try_collect()?; + let aggs = agg_calls.iter().map(build_retractable).try_collect()?; Ok(StatelessSimpleAggExecutor { - ctx, + _ctx: ctx, input, info, aggs, diff --git a/src/stream/src/executor/temporal_join.rs b/src/stream/src/executor/temporal_join.rs index 1372ad808eab0..82c1e56649672 100644 --- a/src/stream/src/executor/temporal_join.rs +++ b/src/stream/src/executor/temporal_join.rs @@ -32,7 +32,7 @@ use risingwave_common::hash::{HashKey, NullBitmap}; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::types::DataType; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::expr::NonStrictExpression; use risingwave_hummock_sdk::{HummockEpoch, HummockReadEpoch}; use risingwave_storage::store::PrefetchOptions; use risingwave_storage::table::batch_table::storage_table::StorageTable; @@ -44,7 +44,9 @@ use crate::cache::{cache_may_stale, new_with_hasher_in, ManagedLruCache}; use crate::common::metrics::MetricsInfo; use crate::common::JoinStreamChunkBuilder; use crate::executor::monitor::StreamingMetrics; -use crate::executor::{ActorContextRef, BoxedExecutor, JoinType, JoinTypePrimitive, PkIndices}; +use crate::executor::{ + ActorContextRef, BoxedExecutor, JoinType, JoinTypePrimitive, PkIndices, Watermark, +}; use crate::task::AtomicU64Ref; pub struct TemporalJoinExecutor { @@ -55,7 +57,7 @@ pub struct TemporalJoinExecutor, right_join_keys: Vec, null_safe: Vec, - condition: Option, + condition: Option, output_indices: Vec, pk_indices: PkIndices, schema: Schema, @@ -152,10 +154,11 @@ impl TemporalSide { async fn lookup(&mut self, key: &K, epoch: HummockEpoch) -> StreamExecutorResult { let table_id_str = self.source.table_id().to_string(); let actor_id_str = self.ctx.id.to_string(); + let fragment_id_str = self.ctx.id.to_string(); self.ctx .streaming_metrics .temporal_join_total_query_cache_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); let res = if self.cache.contains(key) { @@ -166,7 +169,7 @@ impl TemporalSide { self.ctx .streaming_metrics .temporal_join_cache_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); let pk_prefix = key.deserialize(&self.join_key_data_types)?; @@ -207,10 +210,11 @@ impl TemporalSide { right_stream_key_indices: &[usize], ) -> StreamExecutorResult<()> { for chunk in chunks { - // Compact chunk, otherwise the following keys and chunk rows might fail to zip. - let chunk = chunk.compact(); let keys = K::build(join_keys, chunk.data_chunk())?; - for ((op, row), key) in chunk.rows().zip_eq_debug(keys.into_iter()) { + for (r, key) in chunk.rows_with_holes().zip_eq_debug(keys.into_iter()) { + let Some((op, row)) = r else { + continue; + }; if self.cache.contains(&key) { // Update cache let mut entry = self.cache.get_mut(&key).unwrap(); @@ -235,15 +239,16 @@ impl TemporalSide { enum InternalMessage { Chunk(StreamChunk), Barrier(Vec, Barrier), + WaterMark(Watermark), } #[try_stream(ok = StreamChunk, error = StreamExecutorError)] -pub async fn chunks_until_barrier(stream: impl MessageStream, expected_barrier: Barrier) { +async fn chunks_until_barrier(stream: impl MessageStream, expected_barrier: Barrier) { #[for_await] for item in stream { match item? { Message::Watermark(_) => { - // TODO: https://github.com/risingwavelabs/risingwave/issues/6042 + // ignore } Message::Chunk(c) => yield c, Message::Barrier(b) if b.epoch != expected_barrier.epoch => { @@ -254,6 +259,23 @@ pub async fn chunks_until_barrier(stream: impl MessageStream, expected_barrier: } } +#[try_stream(ok = InternalMessage, error = StreamExecutorError)] +async fn internal_messages_until_barrier(stream: impl MessageStream, expected_barrier: Barrier) { + #[for_await] + for item in stream { + match item? { + Message::Watermark(w) => { + yield InternalMessage::WaterMark(w); + } + Message::Chunk(c) => yield InternalMessage::Chunk(c), + Message::Barrier(b) if b.epoch != expected_barrier.epoch => { + return Err(StreamExecutorError::align_barrier(expected_barrier, b)); + } + Message::Barrier(_) => return Ok(()), + } + } +} + // Align the left and right inputs according to their barriers, // such that in the produced stream, an aligned interval starts with // any number of `InternalMessage::Chunk(left_chunk)` and followed by @@ -285,18 +307,20 @@ async fn align_input(left: Box, right: Box) { } Some(Either::Right(Ok(Message::Barrier(b)))) => { #[for_await] - for chunk in chunks_until_barrier(left.by_ref(), b.clone()) { - yield InternalMessage::Chunk(chunk?); + for internal_message in + internal_messages_until_barrier(left.by_ref(), b.clone()) + { + yield internal_message?; } yield InternalMessage::Barrier(right_chunks, b); break 'inner; } Some(Either::Left(Err(e)) | Either::Right(Err(e))) => return Err(e), - Some( - Either::Left(Ok(Message::Watermark(_))) - | Either::Right(Ok(Message::Watermark(_))), - ) => { - // TODO: https://github.com/risingwavelabs/risingwave/issues/6042 + Some(Either::Left(Ok(Message::Watermark(w)))) => { + yield InternalMessage::WaterMark(w); + } + Some(Either::Right(Ok(Message::Watermark(_)))) => { + // ignore right side watermark } None => return Ok(()), } @@ -314,7 +338,7 @@ impl TemporalJoinExecutor left_join_keys: Vec, right_join_keys: Vec, null_safe: Vec, - condition: Option, + condition: Option, pk_indices: PkIndices, output_indices: Vec, table_output_indices: Vec, @@ -381,6 +405,8 @@ impl TemporalJoinExecutor self.right.schema().len(), ); + let left_to_output: HashMap = HashMap::from_iter(left_map.iter().cloned()); + let right_stream_key_indices = self.right.pk_indices().to_vec(); let null_matched = K::Bitmap::from_bool_vec(self.null_safe); @@ -389,18 +415,21 @@ impl TemporalJoinExecutor let table_id_str = self.right_table.source.table_id().to_string(); let actor_id_str = self.ctx.id.to_string(); + let fragment_id_str = self.ctx.fragment_id.to_string(); #[for_await] for msg in align_input(self.left, self.right) { self.right_table.cache.evict(); self.ctx .streaming_metrics .temporal_join_cached_entry_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .set(self.right_table.cache.len() as i64); match msg? { + InternalMessage::WaterMark(watermark) => { + let output_watermark_col_idx = *left_to_output.get(&watermark.col_idx).unwrap(); + yield Message::Watermark(watermark.with_idx(output_watermark_col_idx)); + } InternalMessage::Chunk(chunk) => { - // Compact chunk, otherwise the following keys and chunk rows might fail to zip. - let chunk = chunk.compact(); let mut builder = JoinStreamChunkBuilder::new( self.chunk_size, self.schema.data_types(), @@ -409,7 +438,10 @@ impl TemporalJoinExecutor ); let epoch = prev_epoch.expect("Chunk data should come after some barrier."); let keys = K::build(&self.left_join_keys, chunk.data_chunk())?; - for ((op, left_row), key) in chunk.rows().zip_eq_debug(keys.into_iter()) { + for (r, key) in chunk.rows_with_holes().zip_eq_debug(keys.into_iter()) { + let Some((op, left_row)) = r else { + continue; + }; if key.null_bitmap().is_subset(&null_matched) && let join_entry = self.right_table.lookup(&key, epoch).await? && !join_entry.is_empty() { @@ -417,9 +449,7 @@ impl TemporalJoinExecutor // check join condition let ok = if let Some(ref mut cond) = self.condition { let concat_row = left_row.chain(&right_row).into_owned_row(); - cond.eval_row_infallible(&concat_row, |err| { - self.ctx.on_compute_error(err, self.identity.as_str()) - }) + cond.eval_row_infallible(&concat_row) .await .map(|s| *s.as_bool()) .unwrap_or(false) diff --git a/src/stream/src/executor/test_utils.rs b/src/stream/src/executor/test_utils.rs index 95479f448b895..13a9237cf0159 100644 --- a/src/stream/src/executor/test_utils.rs +++ b/src/stream/src/executor/test_utils.rs @@ -34,11 +34,11 @@ pub mod prelude { pub use risingwave_common::test_prelude::StreamChunkTestExt; pub use risingwave_common::types::DataType; pub use risingwave_common::util::sort_util::OrderType; - pub use risingwave_expr::expr::build_from_pretty; pub use risingwave_storage::memory::MemoryStateStore; pub use risingwave_storage::StateStore; pub use crate::common::table::state_table::StateTable; + pub use crate::executor::test_utils::expr::build_from_pretty; pub use crate::executor::test_utils::{MessageSender, MockSource, StreamExecutorTestExt}; pub use crate::executor::{ActorContext, BoxedMessageStream, Executor, PkIndices}; } @@ -263,6 +263,14 @@ pub trait StreamExecutorTestExt: MessageStream + Unpin { // FIXME: implement on any `impl MessageStream` if the analyzer works well. impl StreamExecutorTestExt for BoxedMessageStream {} +pub mod expr { + use risingwave_expr::expr::NonStrictExpression; + + pub fn build_from_pretty(s: impl AsRef) -> NonStrictExpression { + NonStrictExpression::for_test(risingwave_expr::expr::build_from_pretty(s)) + } +} + pub mod agg_executor { use std::sync::atomic::AtomicU64; use std::sync::Arc; @@ -271,7 +279,7 @@ pub mod agg_executor { use risingwave_common::hash::SerializedKey; use risingwave_common::types::DataType; use risingwave_common::util::sort_util::OrderType; - use risingwave_expr::agg::{AggCall, AggKind}; + use risingwave_expr::aggregate::{AggCall, AggKind}; use risingwave_storage::StateStore; use crate::common::table::state_table::StateTable; @@ -355,8 +363,8 @@ pub mod agg_executor { } } - /// Create result state table for agg executor. - pub async fn create_result_table( + /// Create intermediate state table for agg executor. + pub async fn create_intermediate_state_table( store: S, table_id: TableId, agg_calls: &[AggCall], @@ -386,7 +394,7 @@ pub mod agg_executor { add_column_desc(agg_call.return_type.clone()); }); - StateTable::new_without_distribution( + StateTable::new_without_distribution_inconsistent_op( store, table_id, column_descs, @@ -426,7 +434,7 @@ pub mod agg_executor { ) } - let result_table = create_result_table( + let intermediate_state_table = create_intermediate_state_table( store, TableId::new(agg_calls.len() as u32), &agg_calls, @@ -446,7 +454,7 @@ pub mod agg_executor { agg_calls, row_count_index, storages, - result_table, + intermediate_state_table, distinct_dedup_tables: Default::default(), watermark_epoch: Arc::new(AtomicU64::new(0)), metrics: Arc::new(StreamingMetrics::unused()), @@ -454,6 +462,7 @@ pub mod agg_executor { extra: HashAggExecutorExtraArgs { group_key_indices, chunk_size: 1024, + max_dirty_groups_heap_size: 64 << 20, emit_on_window_close, }, }) @@ -488,7 +497,7 @@ pub mod agg_executor { ) } - let result_table = create_result_table( + let intermediate_state_table = create_intermediate_state_table( store, TableId::new(agg_calls.len() as u32), &agg_calls, @@ -508,7 +517,7 @@ pub mod agg_executor { agg_calls, row_count_index, storages, - result_table, + intermediate_state_table, distinct_dedup_tables: Default::default(), watermark_epoch: Arc::new(AtomicU64::new(0)), metrics: Arc::new(StreamingMetrics::unused()), diff --git a/src/stream/src/executor/top_n/group_top_n.rs b/src/stream/src/executor/top_n/group_top_n.rs index 421b1141843a0..92681e3c31426 100644 --- a/src/stream/src/executor/top_n/group_top_n.rs +++ b/src/stream/src/executor/top_n/group_top_n.rs @@ -44,15 +44,14 @@ impl GroupTopNExecutor, ctx: ActorContextRef, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, group_by: Vec, state_table: StateTable, watermark_epoch: AtomicU64Ref, ) -> StreamResult { - let info = input.info(); Ok(TopNExecutorWrapper { input, ctx: ctx.clone(), @@ -61,7 +60,6 @@ impl GroupTopNExecutor InnerGroupTopNExecutor { #[allow(clippy::too_many_arguments)] pub fn new( - input_info: ExecutorInfo, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, group_by: Vec, state_table: StateTable, watermark_epoch: AtomicU64Ref, ctx: ActorContextRef, ) -> StreamResult { - let ExecutorInfo { - pk_indices, schema, .. - } = input_info; - let metrics_info = MetricsInfo::new( ctx.streaming_metrics.clone(), state_table.table_id(), @@ -121,15 +114,12 @@ impl InnerGroupTopNExecutor::new(state_table, cache_key_serde.clone()); Ok(Self { - info: ExecutorInfo { - schema, - pk_indices, - identity: format!("GroupTopNExecutor {:X}", executor_id), - }, + info, offset: offset_and_limit.0, limit: offset_and_limit.1, managed_state, @@ -176,11 +166,14 @@ where async fn apply_chunk(&mut self, chunk: StreamChunk) -> StreamExecutorResult { let mut res_ops = Vec::with_capacity(self.limit); let mut res_rows = Vec::with_capacity(self.limit); - let chunk = chunk.compact(); let keys = K::build(&self.group_by, chunk.data_chunk())?; let table_id_str = self.managed_state.state_table.table_id().to_string(); let actor_id_str = self.ctx.id.to_string(); - for ((op, row_ref), group_cache_key) in chunk.rows().zip_eq_debug(keys.iter()) { + let fragment_id_str = self.ctx.fragment_id.to_string(); + for (r, group_cache_key) in chunk.rows_with_holes().zip_eq_debug(keys.iter()) { + let Some((op, row_ref)) = r else { + continue; + }; // The pk without group by let pk_row = row_ref.project(&self.storage_key_indices[self.group_by.len()..]); let cache_key = serialize_pk_to_cache_key(pk_row, &self.cache_key_serde); @@ -189,7 +182,7 @@ where self.ctx .streaming_metrics .group_top_n_total_query_cache_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); // If 'self.caches' does not already have a cache for the current group, create a new // cache for it and insert it into `self.caches` @@ -197,7 +190,7 @@ where self.ctx .streaming_metrics .group_top_n_cache_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); let mut topn_cache = TopNCache::new(self.offset, self.limit, self.schema().data_types()); @@ -234,7 +227,7 @@ where self.ctx .streaming_metrics .group_top_n_cached_entry_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .set(self.caches.len() as i64); generate_output(res_rows, res_ops, self.schema()) } @@ -398,19 +391,25 @@ mod tests { &pk_indices(), ) .await; - let a = GroupTopNExecutor::::new( - source as Box, - ActorContext::create(0), - storage_key(), - (0, 2), - order_by_1(), - 1, - vec![1], - state_table, - Arc::new(AtomicU64::new(0)), - ) - .unwrap(); - let top_n_executor = Box::new(a); + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), // this includes group key as prefix + identity: "GroupTopNExecutor 1".to_string(), + }; + let top_n_executor = Box::new( + GroupTopNExecutor::::new( + source as Box, + ActorContext::create(0), + info, + storage_key(), + (0, 2), + order_by_1(), + vec![1], + state_table, + Arc::new(AtomicU64::new(0)), + ) + .unwrap(), + ); let mut top_n_executor = top_n_executor.execute(); // consume the init barrier @@ -494,14 +493,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), // this includes group key as prefix + identity: "GroupTopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( GroupTopNExecutor::::new( source as Box, ActorContext::create(0), + info, storage_key(), (1, 2), order_by_1(), - 1, vec![1], state_table, Arc::new(AtomicU64::new(0)), @@ -584,14 +588,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), // this includes group key as prefix + identity: "GroupTopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( GroupTopNExecutor::::new( source as Box, ActorContext::create(0), + info, storage_key(), (0, 2), order_by_2(), - 1, vec![1, 2], state_table, Arc::new(AtomicU64::new(0)), diff --git a/src/stream/src/executor/top_n/group_top_n_appendonly.rs b/src/stream/src/executor/top_n/group_top_n_appendonly.rs index ceb4a3bca4d40..f8f3d4887b970 100644 --- a/src/stream/src/executor/top_n/group_top_n_appendonly.rs +++ b/src/stream/src/executor/top_n/group_top_n_appendonly.rs @@ -62,15 +62,14 @@ impl pub fn new( input: Box, ctx: ActorContextRef, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, group_by: Vec, state_table: StateTable, watermark_epoch: AtomicU64Ref, ) -> StreamResult { - let info = input.info(); Ok(TopNExecutorWrapper { input, ctx: ctx.clone(), @@ -79,7 +78,6 @@ impl storage_key, offset_and_limit, order_by, - executor_id, group_by, state_table, watermark_epoch, @@ -120,20 +118,15 @@ impl { #[allow(clippy::too_many_arguments)] pub fn new( - input_info: ExecutorInfo, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, group_by: Vec, state_table: StateTable, watermark_epoch: AtomicU64Ref, ctx: ActorContextRef, ) -> StreamResult { - let ExecutorInfo { - pk_indices, schema, .. - } = input_info; - let metrics_info = MetricsInfo::new( ctx.streaming_metrics.clone(), state_table.table_id(), @@ -141,15 +134,12 @@ impl "GroupTopN", ); - let cache_key_serde = create_cache_key_serde(&storage_key, &schema, &order_by, &group_by); + let cache_key_serde = + create_cache_key_serde(&storage_key, &info.schema, &order_by, &group_by); let managed_state = ManagedTopNState::::new(state_table, cache_key_serde.clone()); Ok(Self { - info: ExecutorInfo { - schema, - pk_indices, - identity: format!("AppendOnlyGroupTopNExecutor {:X}", executor_id), - }, + info, offset: offset_and_limit.0, limit: offset_and_limit.1, managed_state, @@ -170,14 +160,17 @@ where async fn apply_chunk(&mut self, chunk: StreamChunk) -> StreamExecutorResult { let mut res_ops = Vec::with_capacity(self.limit); let mut res_rows = Vec::with_capacity(self.limit); - let chunk = chunk.compact(); let keys = K::build(&self.group_by, chunk.data_chunk())?; let data_types = self.schema().data_types(); let row_deserializer = RowDeserializer::new(data_types.clone()); let table_id_str = self.managed_state.state_table.table_id().to_string(); let actor_id_str = self.ctx.id.to_string(); - for ((op, row_ref), group_cache_key) in chunk.rows().zip_eq_debug(keys.iter()) { + let fragment_id_str = self.ctx.fragment_id.to_string(); + for (r, group_cache_key) in chunk.rows_with_holes().zip_eq_debug(keys.iter()) { + let Some((op, row_ref)) = r else { + continue; + }; // The pk without group by let pk_row = row_ref.project(&self.storage_key_indices[self.group_by.len()..]); let cache_key = serialize_pk_to_cache_key(pk_row, &self.cache_key_serde); @@ -186,7 +179,7 @@ where self.ctx .streaming_metrics .group_top_n_appendonly_total_query_cache_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); // If 'self.caches' does not already have a cache for the current group, create a new // cache for it and insert it into `self.caches` @@ -194,7 +187,7 @@ where self.ctx .streaming_metrics .group_top_n_appendonly_cache_miss_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .inc(); let mut topn_cache = TopNCache::new(self.offset, self.limit, data_types.clone()); self.managed_state @@ -217,7 +210,7 @@ where self.ctx .streaming_metrics .group_top_n_appendonly_cached_entry_count - .with_label_values(&[&table_id_str, &actor_id_str]) + .with_label_values(&[&table_id_str, &actor_id_str, &fragment_id_str]) .set(self.caches.len() as i64); generate_output(res_rows, res_ops, self.schema()) } diff --git a/src/stream/src/executor/top_n/top_n_appendonly.rs b/src/stream/src/executor/top_n/top_n_appendonly.rs index aa93e2f2f519e..a56b43e4c5903 100644 --- a/src/stream/src/executor/top_n/top_n_appendonly.rs +++ b/src/stream/src/executor/top_n/top_n_appendonly.rs @@ -41,14 +41,12 @@ impl AppendOnlyTopNExecutor pub fn new( input: Box, ctx: ActorContextRef, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, state_table: StateTable, ) -> StreamResult { - let info = input.info(); - Ok(TopNExecutorWrapper { input, ctx, @@ -57,7 +55,6 @@ impl AppendOnlyTopNExecutor storage_key, offset_and_limit, order_by, - executor_id, state_table, )?, }) @@ -84,30 +81,21 @@ pub struct InnerAppendOnlyTopNExecutor { impl InnerAppendOnlyTopNExecutor { #[allow(clippy::too_many_arguments)] pub fn new( - input_info: ExecutorInfo, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, state_table: StateTable, ) -> StreamResult { - let ExecutorInfo { - pk_indices, schema, .. - } = input_info; - let num_offset = offset_and_limit.0; let num_limit = offset_and_limit.1; - let cache_key_serde = create_cache_key_serde(&storage_key, &schema, &order_by, &[]); + let cache_key_serde = create_cache_key_serde(&storage_key, &info.schema, &order_by, &[]); let managed_state = ManagedTopNState::::new(state_table, cache_key_serde.clone()); - let data_types = schema.data_types(); + let data_types = info.schema.data_types(); Ok(Self { - info: ExecutorInfo { - schema, - pk_indices, - identity: format!("AppendOnlyTopNExecutor {:X}", executor_id), - }, + info, managed_state, storage_key_indices: storage_key.into_iter().map(|op| op.column_index).collect(), cache: TopNCache::new(num_offset, num_limit, data_types), @@ -180,7 +168,7 @@ mod tests { use super::AppendOnlyTopNExecutor; use crate::executor::test_utils::top_n_executor::create_in_memory_state_table; use crate::executor::test_utils::MockSource; - use crate::executor::{ActorContext, Barrier, Executor, Message, PkIndices}; + use crate::executor::{ActorContext, Barrier, Executor, ExecutorInfo, Message, PkIndices}; fn create_stream_chunks() -> Vec { let chunk1 = StreamChunk::from_pretty( @@ -261,14 +249,19 @@ mod tests { ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "AppendOnlyTopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( AppendOnlyTopNExecutor::<_, false>::new( source as Box, ActorContext::create(0), + info, storage_key, (0, 5), order_by(), - 1, state_table, ) .unwrap(), @@ -343,14 +336,19 @@ mod tests { ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "AppendOnlyTopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( AppendOnlyTopNExecutor::<_, false>::new( source as Box, ActorContext::create(0), + info, storage_key(), (3, 4), order_by(), - 1, state_table, ) .unwrap(), diff --git a/src/stream/src/executor/top_n/top_n_plain.rs b/src/stream/src/executor/top_n/top_n_plain.rs index cfb71053b18e4..e3cc70b9fc0b7 100644 --- a/src/stream/src/executor/top_n/top_n_plain.rs +++ b/src/stream/src/executor/top_n/top_n_plain.rs @@ -36,14 +36,12 @@ impl TopNExecutor { pub fn new( input: Box, ctx: ActorContextRef, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, state_table: StateTable, ) -> StreamResult { - let info = input.info(); - Ok(TopNExecutorWrapper { input, ctx, @@ -52,7 +50,6 @@ impl TopNExecutor { storage_key, offset_and_limit, order_by, - executor_id, state_table, )?, }) @@ -67,22 +64,14 @@ impl TopNExecutor { pub fn new_with_ties_for_test( input: Box, ctx: ActorContextRef, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, state_table: StateTable, ) -> StreamResult { - let info = input.info(); - - let mut inner = InnerTopNExecutor::new( - info, - storage_key, - offset_and_limit, - order_by, - executor_id, - state_table, - )?; + let mut inner = + InnerTopNExecutor::new(info, storage_key, offset_and_limit, order_by, state_table)?; inner.cache.high_capacity = 2; @@ -115,29 +104,21 @@ impl InnerTopNExecutor { /// into `CacheKey`. #[allow(clippy::too_many_arguments)] pub fn new( - input_info: ExecutorInfo, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, state_table: StateTable, ) -> StreamResult { - let ExecutorInfo { - pk_indices, schema, .. - } = input_info; let num_offset = offset_and_limit.0; let num_limit = offset_and_limit.1; - let cache_key_serde = create_cache_key_serde(&storage_key, &schema, &order_by, &[]); + let cache_key_serde = create_cache_key_serde(&storage_key, &info.schema, &order_by, &[]); let managed_state = ManagedTopNState::::new(state_table, cache_key_serde.clone()); - let data_types = schema.data_types(); + let data_types = info.schema.data_types(); Ok(Self { - info: ExecutorInfo { - schema, - pk_indices, - identity: format!("TopNExecutor {:X}", executor_id), - }, + info, managed_state, storage_key_indices: storage_key.into_iter().map(|op| op.column_index).collect(), cache: TopNCache::new(num_offset, num_limit, data_types), @@ -311,14 +292,20 @@ mod tests { &pk_indices(), ) .await; + + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::<_, false>::new( source as Box, ActorContext::create(0), + info, storage_key(), (3, 1000), order_by(), - 1, state_table, ) .unwrap(), @@ -407,14 +394,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::<_, false>::new( source as Box, ActorContext::create(0), + info, storage_key(), (0, 4), order_by(), - 1, state_table, ) .unwrap(), @@ -515,14 +507,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::<_, true>::new( source as Box, ActorContext::create(0), + info, storage_key(), (0, 4), order_by(), - 1, state_table, ) .unwrap(), @@ -622,14 +619,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::<_, false>::new( source as Box, ActorContext::create(0), + info, storage_key(), (3, 4), order_by(), - 1, state_table, ) .unwrap(), @@ -849,14 +851,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::<_, false>::new( source as Box, ActorContext::create(0), + info, storage_key(), (1, 3), order_by(), - 1, state_table, ) .unwrap(), @@ -927,14 +934,20 @@ mod tests { state_store.clone(), ) .await; + let source = create_source_new_before_recovery(); + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::<_, false>::new( - create_source_new_before_recovery() as Box, + source as Box, ActorContext::create(0), + info, storage_key(), (1, 3), order_by(), - 1, state_table, ) .unwrap(), @@ -981,14 +994,20 @@ mod tests { .await; // recovery + let source = create_source_new_after_recovery(); + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor_after_recovery = Box::new( TopNExecutor::<_, false>::new( - create_source_new_after_recovery() as Box, + source as Box, ActorContext::create(0), + info, storage_key(), (1, 3), order_by(), - 1, state_table, ) .unwrap(), @@ -1110,14 +1129,19 @@ mod tests { &pk_indices(), ) .await; + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::new_with_ties_for_test( source as Box, ActorContext::create(0), + info, storage_key(), (0, 3), order_by(), - 1, state_table, ) .unwrap(), @@ -1260,14 +1284,20 @@ mod tests { state_store.clone(), ) .await; + let source = create_source_before_recovery(); + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor = Box::new( TopNExecutor::new_with_ties_for_test( - create_source_before_recovery() as Box, + source as Box, ActorContext::create(0), + info, storage_key(), (0, 3), order_by(), - 1, state_table, ) .unwrap(), @@ -1317,14 +1347,20 @@ mod tests { .await; // recovery + let source = create_source_after_recovery(); + let info = ExecutorInfo { + schema: source.schema().clone(), + pk_indices: source.pk_indices().to_vec(), + identity: "TopNExecutor 1".to_string(), + }; let top_n_executor_after_recovery = Box::new( TopNExecutor::new_with_ties_for_test( - create_source_after_recovery() as Box, + source as Box, ActorContext::create(0), + info, storage_key(), (0, 3), order_by(), - 1, state_table, ) .unwrap(), diff --git a/src/stream/src/executor/top_n/top_n_state.rs b/src/stream/src/executor/top_n/top_n_state.rs index 87d19e8550861..841e7f5bb50d7 100644 --- a/src/stream/src/executor/top_n/top_n_state.rs +++ b/src/stream/src/executor/top_n/top_n_state.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Bound; + use futures::{pin_mut, StreamExt}; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::util::epoch::EpochPair; @@ -81,9 +83,10 @@ impl ManagedTopNState { offset: usize, limit: Option, ) -> StreamExecutorResult> { + let sub_range: &(Bound, Bound) = &(Bound::Unbounded, Bound::Unbounded); let state_table_iter = self .state_table - .iter_row_with_pk_prefix(&group_key, Default::default()) + .iter_with_prefix(&group_key, sub_range, Default::default()) .await?; pin_mut!(state_table_iter); @@ -118,10 +121,12 @@ impl ManagedTopNState { cache_size_limit: usize, ) -> StreamExecutorResult<()> { let cache = &mut topn_cache.high; + let sub_range: &(Bound, Bound) = &(Bound::Unbounded, Bound::Unbounded); let state_table_iter = self .state_table - .iter_row_with_pk_prefix( + .iter_with_prefix( &group_key, + sub_range, PrefetchOptions { exhaust_iter: cache_size_limit == usize::MAX, }, @@ -165,11 +170,12 @@ impl ManagedTopNState { assert!(topn_cache.low.is_empty()); assert!(topn_cache.middle.is_empty()); assert!(topn_cache.high.is_empty()); - + let sub_range: &(Bound, Bound) = &(Bound::Unbounded, Bound::Unbounded); let state_table_iter = self .state_table - .iter_row_with_pk_prefix( + .iter_with_prefix( &group_key, + sub_range, PrefetchOptions { exhaust_iter: topn_cache.limit == usize::MAX, }, diff --git a/src/stream/src/executor/top_n/utils.rs b/src/stream/src/executor/top_n/utils.rs index 198338d523097..cd235e9a26e00 100644 --- a/src/stream/src/executor/top_n/utils.rs +++ b/src/stream/src/executor/top_n/utils.rs @@ -164,7 +164,7 @@ pub fn generate_output( } // since `new_rows` is not empty, we unwrap directly let new_data_chunk = data_chunk_builder.consume_all().unwrap(); - let new_stream_chunk = StreamChunk::new(new_ops, new_data_chunk.columns().to_vec(), None); + let new_stream_chunk = StreamChunk::new(new_ops, new_data_chunk.columns().to_vec()); Ok(new_stream_chunk) } else { let columns = schema @@ -172,7 +172,7 @@ pub fn generate_output( .into_iter() .map(|x| x.finish().into()) .collect_vec(); - Ok(StreamChunk::new(vec![], columns, None)) + Ok(StreamChunk::new(vec![], columns)) } } diff --git a/src/stream/src/executor/values.rs b/src/stream/src/executor/values.rs index 512e9f6c28da3..8c09b56aa3551 100644 --- a/src/stream/src/executor/values.rs +++ b/src/stream/src/executor/values.rs @@ -21,7 +21,7 @@ use risingwave_common::array::{DataChunk, Op, StreamChunk}; use risingwave_common::catalog::Schema; use risingwave_common::ensure; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::expr::BoxedExpression; +use risingwave_expr::expr::NonStrictExpression; use tokio::sync::mpsc::UnboundedReceiver; use super::{ @@ -40,7 +40,7 @@ pub struct ValuesExecutor { barrier_receiver: UnboundedReceiver, progress: CreateMviewProgress, - rows: vec::IntoIter>, + rows: vec::IntoIter>, pk_indices: PkIndices, identity: String, schema: Schema, @@ -51,7 +51,7 @@ impl ValuesExecutor { pub fn new( ctx: ActorContextRef, progress: CreateMviewProgress, - rows: Vec>, + rows: Vec>, schema: Schema, barrier_receiver: UnboundedReceiver, executor_id: u64, @@ -83,10 +83,23 @@ impl ValuesExecutor { .unwrap(); let emit = barrier.is_newly_added(self.ctx.id); + let paused_on_startup = barrier.is_pause_on_startup(); yield Message::Barrier(barrier); + // If it's failover, do not evaluate rows (assume they have been yielded) if emit { + if paused_on_startup { + // Wait for the data stream to be resumed before yielding the chunks. + while let Some(barrier) = barrier_receiver.recv().await { + let is_resume = barrier.is_resume(); + yield Message::Barrier(barrier); + if is_resume { + break; + } + } + } + let cardinality = schema.len(); ensure!(cardinality > 0); while !rows.is_empty() { @@ -99,11 +112,7 @@ impl ValuesExecutor { let mut array_builders = schema.create_array_builders(chunk_size); for row in rows.by_ref().take(chunk_size) { for (expr, builder) in row.into_iter().zip_eq_fast(&mut array_builders) { - let out = expr - .eval_infallible(&one_row_chunk, |err| { - self.ctx.on_compute_error(err, self.identity.as_str()) - }) - .await; + let out = expr.eval_infallible(&one_row_chunk).await; builder.append_array(&out); } } @@ -123,7 +132,7 @@ impl ValuesExecutor { while let Some(barrier) = barrier_receiver.recv().await { if emit { - progress.finish(barrier.epoch.curr); + progress.finish(barrier.epoch.curr, 0); } yield Message::Barrier(barrier); } @@ -158,7 +167,7 @@ mod tests { }; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::{DataType, ScalarImpl, StructType}; - use risingwave_expr::expr::{BoxedExpression, LiteralExpression}; + use risingwave_expr::expr::{BoxedExpression, LiteralExpression, NonStrictExpression}; use tokio::sync::mpsc::unbounded_channel; use super::ValuesExecutor; @@ -193,11 +202,11 @@ mod tests { vec![], ), Some(ScalarImpl::Struct(value)), - )) as BoxedExpression, + )), Box::new(LiteralExpression::new( DataType::Int64, Some(ScalarImpl::Int64(0)), - )) as BoxedExpression, + )), ]; let fields = exprs .iter() // for each column @@ -206,7 +215,10 @@ mod tests { let values_executor_struct = ValuesExecutor::new( ActorContext::create(actor_id), progress, - vec![exprs], + vec![exprs + .into_iter() + .map(NonStrictExpression::for_test) + .collect()], Schema { fields }, barrier_receiver, 10005, diff --git a/src/stream/src/executor/watermark_filter.rs b/src/stream/src/executor/watermark_filter.rs index 5f2a92e4e7f7b..5e5454cecff93 100644 --- a/src/stream/src/executor/watermark_filter.rs +++ b/src/stream/src/executor/watermark_filter.rs @@ -23,7 +23,8 @@ use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::types::{DataType, DefaultOrd, ScalarImpl}; use risingwave_common::{bail, row}; use risingwave_expr::expr::{ - build_func, BoxedExpression, Expression, InputRefExpression, LiteralExpression, + build_func_non_strict, ExpressionBoxExt, InputRefExpression, LiteralExpression, + NonStrictExpression, }; use risingwave_expr::Result as ExprResult; use risingwave_pb::expr::expr_node::Type; @@ -36,6 +37,7 @@ use super::{ }; use crate::common::table::state_table::StateTable; use crate::executor::{expect_first_barrier, Watermark}; +use crate::task::ActorEvalErrorReport; /// The executor will generate a `Watermark` after each chunk. /// This will also guarantee all later rows with event time **less than** the watermark will be @@ -43,7 +45,7 @@ use crate::executor::{expect_first_barrier, Watermark}; pub struct WatermarkFilterExecutor { input: BoxedExecutor, /// The expression used to calculate the watermark value. - watermark_expr: BoxedExpression, + watermark_expr: NonStrictExpression, /// The column we should generate watermark and filter on. event_time_col_idx: usize, ctx: ActorContextRef, @@ -54,12 +56,17 @@ pub struct WatermarkFilterExecutor { impl WatermarkFilterExecutor { pub fn new( input: BoxedExecutor, - watermark_expr: BoxedExpression, + watermark_expr: NonStrictExpression, event_time_col_idx: usize, ctx: ActorContextRef, table: StateTable, + executor_id: u64, ) -> Self { - let info = input.info(); + let info = ExecutorInfo { + schema: input.info().schema, + pk_indices: input.info().pk_indices, + identity: format!("WatermarkFilterExecutor {:X}", executor_id), + }; Self { input, @@ -106,6 +113,11 @@ impl WatermarkFilterExecutor { mut table, } = *self; + let eval_error_report = ActorEvalErrorReport { + actor_context: ctx.clone(), + identity: info.identity.into(), + }; + let watermark_type = watermark_expr.return_type(); assert_eq!( watermark_type, @@ -119,16 +131,17 @@ impl WatermarkFilterExecutor { yield Message::Barrier(first_barrier); // Initiate and yield the first watermark. - let mut current_watermark = - Self::get_global_max_watermark(&table, watermark_type.clone()).await?; + let mut current_watermark = Self::get_global_max_watermark(&table).await?; - let mut last_checkpoint_watermark = watermark_type.min_value(); + let mut last_checkpoint_watermark = None; - yield Message::Watermark(Watermark::new( - event_time_col_idx, - watermark_type.clone(), - current_watermark.clone(), - )); + if let Some(watermark) = current_watermark.clone() { + yield Message::Watermark(Watermark::new( + event_time_col_idx, + watermark_type.clone(), + watermark.clone(), + )); + } // If the input is idle let mut idle_input = true; @@ -145,18 +158,20 @@ impl WatermarkFilterExecutor { continue; } - let watermark_array = watermark_expr - .eval_infallible(chunk.data_chunk(), |err| { - ctx.on_compute_error(err, &info.identity) - }) - .await; + let watermark_array = watermark_expr.eval_infallible(chunk.data_chunk()).await; // Build the expression to calculate watermark filter. - let watermark_filter_expr = Self::build_watermark_filter_expr( - watermark_type.clone(), - event_time_col_idx, - current_watermark.clone(), - )?; + let watermark_filter_expr = current_watermark + .clone() + .map(|watermark| { + Self::build_watermark_filter_expr( + watermark_type.clone(), + event_time_col_idx, + watermark, + eval_error_report.clone(), + ) + }) + .transpose()?; // NULL watermark should not be considered. let max_watermark = watermark_array @@ -166,41 +181,49 @@ impl WatermarkFilterExecutor { if let Some(max_watermark) = max_watermark { // Assign a new watermark. - current_watermark = cmp::max_by( - current_watermark, + current_watermark = Some(current_watermark.map_or( max_watermark.into_scalar_impl(), - DefaultOrd::default_cmp, - ); + |watermark| { + cmp::max_by( + watermark, + max_watermark.into_scalar_impl(), + DefaultOrd::default_cmp, + ) + }, + )); } - let pred_output = watermark_filter_expr - .eval_infallible(chunk.data_chunk(), |err| { - ctx.on_compute_error(err, &info.identity) - }) - .await; - - if let Some(output_chunk) = FilterExecutor::filter(chunk, pred_output)? { - yield Message::Chunk(output_chunk); - }; - - idle_input = false; - yield Message::Watermark(Watermark::new( - event_time_col_idx, - watermark_type.clone(), - current_watermark.clone(), - )); + if let Some(expr) = watermark_filter_expr { + let pred_output = expr.eval_infallible(chunk.data_chunk()).await; + + if let Some(output_chunk) = FilterExecutor::filter(chunk, pred_output)? { + yield Message::Chunk(output_chunk); + }; + } else { + // No watermark + yield Message::Chunk(chunk); + } + + if let Some(watermark) = current_watermark.clone() { + idle_input = false; + yield Message::Watermark(Watermark::new( + event_time_col_idx, + watermark_type.clone(), + watermark, + )); + } } Message::Watermark(watermark) => { if watermark.col_idx == event_time_col_idx { tracing::warn!("WatermarkFilterExecutor received a watermark on the event it is filtering."); let watermark = watermark.val; - if current_watermark.default_cmp(&watermark).is_lt() { - current_watermark = watermark; + if let Some(cur_watermark) = current_watermark.clone() && cur_watermark.default_cmp(&watermark).is_lt() { + current_watermark = Some(watermark.clone()); idle_input = false; yield Message::Watermark(Watermark::new( event_time_col_idx, watermark_type.clone(), - current_watermark.clone(), + watermark, )); } } else { @@ -215,9 +238,7 @@ impl WatermarkFilterExecutor { // Take the global max watermark when scaling happens. if previous_vnode_bitmap != vnode_bitmap { - current_watermark = - Self::get_global_max_watermark(&table, watermark_type.clone()) - .await?; + current_watermark = Self::get_global_max_watermark(&table).await?; } } @@ -226,12 +247,14 @@ impl WatermarkFilterExecutor { { last_checkpoint_watermark = current_watermark.clone(); // Persist the watermark when checkpoint arrives. - let vnodes = table.get_vnodes(); - for vnode in vnodes.iter_vnodes() { - let pk = Some(ScalarImpl::Int16(vnode.to_scalar())); - let row = [pk, Some(current_watermark.clone())]; - // FIXME(yuhao): use upsert. - table.insert(row); + if let Some(watermark) = current_watermark.clone() { + let vnodes = table.get_vnodes(); + for vnode in vnodes.iter_vnodes() { + let pk = Some(ScalarImpl::Int16(vnode.to_scalar())); + let row = [pk, Some(watermark.clone())]; + // This is an upsert. + table.insert(row); + } } table.commit(barrier.epoch).await?; } else { @@ -242,18 +265,24 @@ impl WatermarkFilterExecutor { if idle_input { // Align watermark let global_max_watermark = - Self::get_global_max_watermark(&table, watermark_type.clone()) - .await?; - current_watermark = cmp::max_by( - current_watermark, - global_max_watermark, - DefaultOrd::default_cmp, - ); - yield Message::Watermark(Watermark::new( - event_time_col_idx, - watermark_type.clone(), - current_watermark.clone(), - )); + Self::get_global_max_watermark(&table).await?; + + current_watermark = if let Some(global_max_watermark) = global_max_watermark.clone() && let Some(watermark) = current_watermark.clone(){ + Some(cmp::max_by( + watermark, + global_max_watermark, + DefaultOrd::default_cmp, + )) + } else { + current_watermark.or(global_max_watermark) + }; + if let Some(watermark) = current_watermark.clone() { + yield Message::Watermark(Watermark::new( + event_time_col_idx, + watermark_type.clone(), + watermark, + )); + } } else { idle_input = true; } @@ -269,21 +298,23 @@ impl WatermarkFilterExecutor { watermark_type: DataType, event_time_col_idx: usize, watermark: ScalarImpl, - ) -> ExprResult { - build_func( + eval_error_report: ActorEvalErrorReport, + ) -> ExprResult { + build_func_non_strict( Type::GreaterThanOrEqual, DataType::Boolean, vec![ InputRefExpression::new(watermark_type.clone(), event_time_col_idx).boxed(), LiteralExpression::new(watermark_type, Some(watermark)).boxed(), ], + eval_error_report, ) } + /// If the returned if `Ok(None)`, it means there is no global max watermark. async fn get_global_max_watermark( table: &StateTable, - watermark_type: DataType, - ) -> StreamExecutorResult { + ) -> StreamExecutorResult> { let watermark_iter_futures = (0..VirtualNode::COUNT).map(|vnode| async move { let pk = row::once(Some(ScalarImpl::Int16(vnode as _))); let watermark_row: Option = table.get_row(pk).await?; @@ -307,8 +338,7 @@ impl WatermarkFilterExecutor { let watermark = watermarks .into_iter() .flatten() - .max_by(DefaultOrd::default_cmp) - .unwrap_or_else(|| watermark_type.min_value()); + .max_by(DefaultOrd::default_cmp); Ok(watermark) } @@ -321,11 +351,11 @@ mod tests { use risingwave_common::test_prelude::StreamChunkTestExt; use risingwave_common::types::Date; use risingwave_common::util::sort_util::OrderType; - use risingwave_expr::expr::build_from_pretty; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::table::Distribution; use super::*; + use crate::executor::test_utils::expr::build_from_pretty; use crate::executor::test_utils::{MessageSender, MockSource}; use crate::executor::ActorContext; @@ -389,6 +419,7 @@ mod tests { 1, ActorContext::create(123), table, + 0, ) .boxed(), tx, @@ -431,13 +462,6 @@ mod tests { }; } - // Init watermark - let watermark = executor.next().await.unwrap().unwrap(); - assert_eq!( - watermark.into_watermark().unwrap(), - watermark!(WATERMARK_TYPE.min_value()), - ); - // push the 1st chunk tx.push_chunk(chunk1); let chunk = executor.next().await.unwrap().unwrap(); diff --git a/src/stream/src/executor/wrapper.rs b/src/stream/src/executor/wrapper.rs index 3109ba6ffe676..1b52911b2b509 100644 --- a/src/stream/src/executor/wrapper.rs +++ b/src/stream/src/executor/wrapper.rs @@ -17,11 +17,7 @@ use std::sync::Arc; use futures::StreamExt; use risingwave_common::catalog::Schema; -use super::monitor::StreamingMetrics; -use super::{ - BoxedExecutor, BoxedMessageStream, Executor, ExecutorInfo, MessageStream, PkIndicesRef, -}; -use crate::task::ActorId; +use super::*; mod epoch_check; mod epoch_provide; @@ -29,21 +25,11 @@ mod schema_check; mod trace; mod update_check; -struct ExtraInfo { - /// Index of input to this operator. - input_pos: usize, - - actor_id: ActorId, - executor_id: u64, - - metrics: Arc, -} - /// [`WrapperExecutor`] will do some sanity checks and logging for the wrapped executor. pub struct WrapperExecutor { input: BoxedExecutor, - extra: ExtraInfo, + actor_ctx: ActorContextRef, enable_executor_row_count: bool, } @@ -51,29 +37,19 @@ pub struct WrapperExecutor { impl WrapperExecutor { pub fn new( input: BoxedExecutor, - input_pos: usize, - actor_id: ActorId, - executor_id: u64, - metrics: Arc, + actor_ctx: ActorContextRef, enable_executor_row_count: bool, ) -> Self { Self { input, - extra: ExtraInfo { - input_pos, - actor_id, - executor_id, - metrics, - }, + actor_ctx, enable_executor_row_count, } } #[allow(clippy::let_and_return)] fn wrap_debug( - _enable_executor_row_count: bool, info: Arc, - _extra: ExtraInfo, stream: impl MessageStream + 'static, ) -> impl MessageStream + 'static { // Update check @@ -85,14 +61,13 @@ impl WrapperExecutor { fn wrap( enable_executor_row_count: bool, info: Arc, - extra: ExtraInfo, + actor_ctx: ActorContextRef, stream: impl MessageStream + 'static, ) -> BoxedMessageStream { // -- Shared wrappers -- // Await tree - let stream = - trace::instrument_await_tree(info.clone(), extra.actor_id, extra.executor_id, stream); + let stream = trace::instrument_await_tree(info.clone(), actor_ctx.id, stream); // Schema check let stream = schema_check::schema_check(info.clone(), stream); @@ -103,18 +78,10 @@ impl WrapperExecutor { let stream = epoch_provide::epoch_provide(stream); // Trace - let stream = trace::trace( - enable_executor_row_count, - info.clone(), - extra.input_pos, - extra.actor_id, - extra.executor_id, - extra.metrics.clone(), - stream, - ); + let stream = trace::trace(enable_executor_row_count, info.clone(), actor_ctx, stream); if cfg!(debug_assertions) { - Self::wrap_debug(enable_executor_row_count, info, extra, stream).boxed() + Self::wrap_debug(info, stream).boxed() } else { stream.boxed() } @@ -127,7 +94,7 @@ impl Executor for WrapperExecutor { Self::wrap( self.enable_executor_row_count, info, - self.extra, + self.actor_ctx, self.input.execute(), ) .boxed() @@ -138,7 +105,7 @@ impl Executor for WrapperExecutor { Self::wrap( self.enable_executor_row_count, info, - self.extra, + self.actor_ctx, self.input.execute_with_epoch(epoch), ) .boxed() diff --git a/src/stream/src/executor/wrapper/schema_check.rs b/src/stream/src/executor/wrapper/schema_check.rs index d23eca2b455c6..3e8738db8327a 100644 --- a/src/stream/src/executor/wrapper/schema_check.rs +++ b/src/stream/src/executor/wrapper/schema_check.rs @@ -45,7 +45,7 @@ pub async fn schema_check(info: Arc, input: impl MessageStream) { } Message::Barrier(_) => Ok(()), } - .unwrap_or_else(|e| panic!("schema check failed on {}: {}", info.identity, e)); + .unwrap_or_else(|e| panic!("schema check failed on {:?}: {}", info, e)); yield message; } diff --git a/src/stream/src/executor/wrapper/trace.rs b/src/stream/src/executor/wrapper/trace.rs index 0b18d54a0bb58..fbf22c5d6d34b 100644 --- a/src/stream/src/executor/wrapper/trace.rs +++ b/src/stream/src/executor/wrapper/trace.rs @@ -20,8 +20,7 @@ use futures_async_stream::try_stream; use tracing::{Instrument, Span}; use crate::executor::error::StreamExecutorError; -use crate::executor::monitor::StreamingMetrics; -use crate::executor::{ExecutorInfo, Message, MessageStream}; +use crate::executor::{ActorContextRef, ExecutorInfo, Message, MessageStream}; use crate::task::ActorId; /// Streams wrapped by `trace` will be traced with `tracing` spans and reported to `opentelemetry`. @@ -29,19 +28,21 @@ use crate::task::ActorId; pub async fn trace( enable_executor_row_count: bool, info: Arc, - _input_pos: usize, - actor_id: ActorId, - executor_id: u64, - metrics: Arc, + actor_ctx: ActorContextRef, input: impl MessageStream, ) { - let actor_id_string = actor_id.to_string(); - - let span_name = pretty_identity(&info.identity, actor_id, executor_id); - - let is_sink_or_mv = info.identity.contains("Materialize") || info.identity.contains("Sink"); - - let new_span = || tracing::info_span!("executor", "otel.name" = span_name, actor_id); + let actor_id_str = actor_ctx.id.to_string(); + let fragment_id_str = actor_ctx.fragment_id.to_string(); + + let span_name = pretty_identity(&info.identity, actor_ctx.id); + + let new_span = || { + tracing::info_span!( + "executor", + "otel.name" = span_name, + "actor_id" = actor_ctx.id + ) + }; let mut span = new_span(); pin_mut!(input); @@ -51,13 +52,14 @@ pub async fn trace( span.in_scope(|| match &message { Message::Chunk(chunk) => { if chunk.cardinality() > 0 { - if enable_executor_row_count || is_sink_or_mv { - metrics + if enable_executor_row_count { + actor_ctx + .streaming_metrics .executor_row_count - .with_label_values(&[&actor_id_string, &span_name]) + .with_label_values(&[&actor_id_str, &fragment_id_str, &info.identity]) .inc_by(chunk.cardinality() as u64); } - tracing::trace!( + tracing::debug!( target: "events::stream::message::chunk", cardinality = chunk.cardinality(), capacity = chunk.capacity(), @@ -66,14 +68,14 @@ pub async fn trace( } } Message::Watermark(watermark) => { - tracing::trace!( + tracing::debug!( target: "events::stream::message::watermark", value = ?watermark.val, col_idx = watermark.col_idx, ); } Message::Barrier(barrier) => { - tracing::trace!( + tracing::debug!( target: "events::stream::message::barrier", prev_epoch = barrier.epoch.prev, curr_epoch = barrier.epoch.curr, @@ -100,13 +102,8 @@ pub async fn trace( } } -fn pretty_identity(identity: &str, actor_id: ActorId, executor_id: u64) -> String { - format!( - "{} (actor {}, operator {})", - identity, - actor_id, - executor_id as u32 // The lower 32 bit is for the operator id. - ) +fn pretty_identity(identity: &str, actor_id: ActorId) -> String { + format!("{} (actor {})", identity, actor_id) } /// Streams wrapped by `instrument_await_tree` will be able to print the spans of the @@ -115,12 +112,11 @@ fn pretty_identity(identity: &str, actor_id: ActorId, executor_id: u64) -> Strin pub async fn instrument_await_tree( info: Arc, actor_id: ActorId, - executor_id: u64, input: impl MessageStream, ) { pin_mut!(input); - let span: await_tree::Span = pretty_identity(&info.identity, actor_id, executor_id).into(); + let span: await_tree::Span = pretty_identity(&info.identity, actor_id).into(); while let Some(message) = input .next() diff --git a/src/stream/src/from_proto/chain.rs b/src/stream/src/from_proto/chain.rs index d1a971a5cbb4a..81030526b82f3 100644 --- a/src/stream/src/from_proto/chain.rs +++ b/src/stream/src/from_proto/chain.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use risingwave_common::catalog::{ColumnDesc, ColumnId, TableId, TableOption}; +use risingwave_common::catalog::{ColumnDesc, ColumnId, Schema, TableId, TableOption}; use risingwave_common::util::sort_util::OrderType; use risingwave_pb::plan_common::StorageTableDesc; use risingwave_pb::stream_plan::{ChainNode, ChainType}; @@ -40,28 +40,31 @@ impl ExecutorBuilder for ChainExecutorBuilder { stream: &mut LocalStreamManagerCore, ) -> StreamResult { let [mview, snapshot]: [_; 2] = params.input.try_into().unwrap(); - // For reporting the progress. let progress = stream .context .register_create_mview_progress(params.actor_context.id); - // The batch query executor scans on a mapped adhoc mview table, thus we should directly use - // its schema. - let schema = snapshot.schema().clone(); - let output_indices = node .output_indices .iter() .map(|&i| i as usize) .collect_vec(); - // For `Chain`s other than `Backfill`, there should be no extra mapping required. We can - // directly output the columns received from the upstream or snapshot. - if !matches!(node.chain_type(), ChainType::Backfill) { - let all_indices = (0..schema.len()).collect_vec(); + let schema = if matches!(node.chain_type(), ChainType::Backfill) { + Schema::new( + output_indices + .iter() + .map(|i| snapshot.schema().fields()[*i].clone()) + .collect_vec(), + ) + } else { + // For `Chain`s other than `Backfill`, there should be no extra mapping required. We can + // directly output the columns received from the upstream or snapshot. + let all_indices = (0..snapshot.schema().len()).collect_vec(); assert_eq!(output_indices, all_indices); - } + snapshot.schema().clone() + }; let executor = match node.chain_type() { ChainType::Chain | ChainType::UpstreamOnly => { @@ -169,6 +172,7 @@ impl ExecutorBuilder for ChainExecutorBuilder { params.pk_indices, stream.streaming_metrics.clone(), params.env.config().developer.chunk_size, + params.executor_id, ) .boxed() } diff --git a/src/stream/src/from_proto/eowc_over_window.rs b/src/stream/src/from_proto/eowc_over_window.rs index 0cd0060a40b68..bcee0736ae30f 100644 --- a/src/stream/src/from_proto/eowc_over_window.rs +++ b/src/stream/src/from_proto/eowc_over_window.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use risingwave_expr::function::window::WindowFuncCall; +use risingwave_expr::window_function::WindowFuncCall; use risingwave_pb::stream_plan::PbEowcOverWindowNode; use risingwave_storage::StateStore; diff --git a/src/stream/src/from_proto/filter.rs b/src/stream/src/from_proto/filter.rs index 32341f1c5ebb1..47661e105c506 100644 --- a/src/stream/src/from_proto/filter.rs +++ b/src/stream/src/from_proto/filter.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr::expr::build_from_prost; +use risingwave_expr::expr::build_non_strict_from_prost; use risingwave_pb::stream_plan::FilterNode; use super::*; @@ -31,7 +31,8 @@ impl ExecutorBuilder for FilterExecutorBuilder { _stream: &mut LocalStreamManagerCore, ) -> StreamResult { let [input]: [_; 1] = params.input.try_into().unwrap(); - let search_condition = build_from_prost(node.get_search_condition()?)?; + let search_condition = + build_non_strict_from_prost(node.get_search_condition()?, params.eval_error_report)?; Ok(FilterExecutor::new( params.actor_context, diff --git a/src/stream/src/from_proto/group_top_n.rs b/src/stream/src/from_proto/group_top_n.rs index 9f74134308daa..a7fc0d741206e 100644 --- a/src/stream/src/from_proto/group_top_n.rs +++ b/src/stream/src/from_proto/group_top_n.rs @@ -60,22 +60,19 @@ impl ExecutorBuilder for GroupTopNExecutorBuilder ExecutorBuilder for GroupTopNExecutorBuilder { input: BoxedExecutor, ctx: ActorContextRef, + info: ExecutorInfo, storage_key: Vec, offset_and_limit: (usize, usize), order_by: Vec, - executor_id: u64, group_by: Vec, state_table: StateTable, watermark_epoch: AtomicU64Ref, @@ -113,10 +110,10 @@ impl HashKeyDispatcher for GroupTopNExecutorDispatcherArgs { Ok($excutor::::new( self.input, self.ctx, + self.info, self.storage_key, self.offset_and_limit, self.order_by, - self.executor_id, self.group_by, self.state_table, self.watermark_epoch, diff --git a/src/stream/src/from_proto/hash_agg.rs b/src/stream/src/from_proto/hash_agg.rs index c0f09d1c504f6..a369f8124ebfb 100644 --- a/src/stream/src/from_proto/hash_agg.rs +++ b/src/stream/src/from_proto/hash_agg.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use risingwave_common::hash::{HashKey, HashKeyDispatcher}; use risingwave_common::types::DataType; -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_pb::stream_plan::HashAggNode; use super::agg_common::{ @@ -84,8 +84,9 @@ impl ExecutorBuilder for HashAggExecutorBuilder { vnodes.clone(), ) .await; - let result_table = StateTable::from_table_catalog( - node.get_result_table().unwrap(), + // disable sanity check so that old value is not required when updating states + let intermediate_state_table = StateTable::from_table_catalog_inconsistent_op( + node.get_intermediate_state_table().unwrap(), store.clone(), vnodes.clone(), ) @@ -106,13 +107,18 @@ impl ExecutorBuilder for HashAggExecutorBuilder { agg_calls, row_count_index: node.get_row_count_index() as usize, storages, - result_table, + intermediate_state_table, distinct_dedup_tables, watermark_epoch: stream.get_watermark_epoch(), metrics: params.executor_stats, extra: HashAggExecutorExtraArgs { group_key_indices, chunk_size: params.env.config().developer.chunk_size, + max_dirty_groups_heap_size: params + .env + .config() + .developer + .hash_agg_max_dirty_groups_heap_size, emit_on_window_close: node.get_emit_on_window_close(), }, }, diff --git a/src/stream/src/from_proto/hash_join.rs b/src/stream/src/from_proto/hash_join.rs index 7f63fba21221f..87174282e517a 100644 --- a/src/stream/src/from_proto/hash_join.rs +++ b/src/stream/src/from_proto/hash_join.rs @@ -17,7 +17,9 @@ use std::sync::Arc; use risingwave_common::hash::{HashKey, HashKeyDispatcher}; use risingwave_common::types::DataType; -use risingwave_expr::expr::{build_from_prost, build_func, BoxedExpression, InputRefExpression}; +use risingwave_expr::expr::{ + build_func_non_strict, build_non_strict_from_prost, InputRefExpression, NonStrictExpression, +}; pub use risingwave_pb::expr::expr_node::Type as ExprType; use risingwave_pb::plan_common::JoinType as JoinTypeProto; use risingwave_pb::stream_plan::HashJoinNode; @@ -80,7 +82,10 @@ impl ExecutorBuilder for HashJoinExecutorBuilder { .collect_vec(); let condition = match node.get_condition() { - Ok(cond_prost) => Some(build_from_prost(cond_prost)?), + Ok(cond_prost) => Some(build_non_strict_from_prost( + cond_prost, + params.eval_error_report.clone(), + )?), Err(_) => None, }; trace!("Join non-equi condition: {:?}", condition); @@ -96,13 +101,18 @@ impl ExecutorBuilder for HashJoinExecutorBuilder { let data_type = source_l.schema().fields [min(key_required_larger, key_required_smaller)] .data_type(); - Some(build_func( + Some(build_func_non_strict( delta_expression.delta_type(), data_type.clone(), vec![ Box::new(InputRefExpression::new(data_type, 0)), - build_from_prost(delta_expression.delta.as_ref().unwrap())?, + build_non_strict_from_prost( + delta_expression.delta.as_ref().unwrap(), + params.eval_error_report.clone(), + )? + .into_inner(), ], + params.eval_error_report.clone(), )?) } else { None @@ -166,8 +176,8 @@ struct HashJoinExecutorDispatcherArgs { pk_indices: PkIndices, output_indices: Vec, executor_id: u64, - cond: Option, - inequality_pairs: Vec<(usize, usize, bool, Option)>, + cond: Option, + inequality_pairs: Vec<(usize, usize, bool, Option)>, op_info: String, state_table_l: StateTable, degree_state_table_l: StateTable, diff --git a/src/stream/src/from_proto/hop_window.rs b/src/stream/src/from_proto/hop_window.rs index 7a4ea721a58ab..5bf0240155fc2 100644 --- a/src/stream/src/from_proto/hop_window.rs +++ b/src/stream/src/from_proto/hop_window.rs @@ -14,7 +14,7 @@ use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; -use risingwave_expr::expr::build_from_prost; +use risingwave_expr::expr::build_non_strict_from_prost; use risingwave_pb::stream_plan::HopWindowNode; use super::*; @@ -52,12 +52,12 @@ impl ExecutorBuilder for HopWindowExecutorBuilder { let window_start_exprs: Vec<_> = node .get_window_start_exprs() .iter() - .map(build_from_prost) + .map(|e| build_non_strict_from_prost(e, params.eval_error_report.clone())) .try_collect()?; let window_end_exprs: Vec<_> = node .get_window_end_exprs() .iter() - .map(build_from_prost) + .map(|e| build_non_strict_from_prost(e, params.eval_error_report.clone())) .try_collect()?; let time_col = node.get_time_col() as usize; diff --git a/src/stream/src/from_proto/mod.rs b/src/stream/src/from_proto/mod.rs index bdb19f022ec37..2ec9476d0e904 100644 --- a/src/stream/src/from_proto/mod.rs +++ b/src/stream/src/from_proto/mod.rs @@ -166,5 +166,6 @@ pub async fn create_executor( NodeBody::NoOp => NoOpExecutorBuilder, NodeBody::EowcOverWindow => EowcOverWindowExecutorBuilder, NodeBody::OverWindow => OverWindowExecutorBuilder, + NodeBody::StreamFsFetch => FsFetchExecutorBuilder, } } diff --git a/src/stream/src/from_proto/mview.rs b/src/stream/src/from_proto/mview.rs index 9c4d084def8ba..d64490b29b84a 100644 --- a/src/stream/src/from_proto/mview.rs +++ b/src/stream/src/from_proto/mview.rs @@ -48,14 +48,19 @@ impl ExecutorBuilder for MaterializeExecutorBuilder { let conflict_behavior = ConflictBehavior::from_protobuf(&table.handle_pk_conflict_behavior()); + let info = ExecutorInfo { + schema: params.schema, + pk_indices: params.pk_indices, + identity: params.identity, + }; macro_rules! new_executor { ($SD:ident) => { MaterializeExecutor::<_, $SD>::new( input, + info, store, order_key, - params.executor_id, params.actor_context, params.vnode_bitmap.map(Arc::new), table, @@ -106,11 +111,16 @@ impl ExecutorBuilder for ArrangeExecutorBuilder { let vnodes = params.vnode_bitmap.map(Arc::new); let conflict_behavior = ConflictBehavior::from_protobuf(&table.handle_pk_conflict_behavior()); + let info = ExecutorInfo { + schema: params.schema, + pk_indices: params.pk_indices, + identity: params.identity, + }; let executor = MaterializeExecutor::<_, BasicSerde>::new( input, + info, store, keys, - params.executor_id, params.actor_context, vnodes, table, diff --git a/src/stream/src/from_proto/over_window.rs b/src/stream/src/from_proto/over_window.rs index 7d139ca3f74db..e18e753caf126 100644 --- a/src/stream/src/from_proto/over_window.rs +++ b/src/stream/src/from_proto/over_window.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use risingwave_common::session_config::OverWindowCachePolicy; use risingwave_common::util::sort_util::ColumnOrder; -use risingwave_expr::function::window::WindowFuncCall; +use risingwave_expr::window_function::WindowFuncCall; use risingwave_pb::stream_plan::PbOverWindowNode; use risingwave_storage::StateStore; diff --git a/src/stream/src/from_proto/project.rs b/src/stream/src/from_proto/project.rs index 111cc46ace641..ea01fd5c129c8 100644 --- a/src/stream/src/from_proto/project.rs +++ b/src/stream/src/from_proto/project.rs @@ -14,7 +14,7 @@ use multimap::MultiMap; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_expr::expr::build_from_prost; +use risingwave_expr::expr::build_non_strict_from_prost; use risingwave_pb::expr::expr_node::RexNode; use risingwave_pb::stream_plan::ProjectNode; @@ -37,7 +37,7 @@ impl ExecutorBuilder for ProjectExecutorBuilder { let project_exprs: Vec<_> = node .get_select_list() .iter() - .map(build_from_prost) + .map(|e| build_non_strict_from_prost(e, params.eval_error_report.clone())) .try_collect()?; let watermark_derivations = MultiMap::from_iter( diff --git a/src/stream/src/from_proto/simple_agg.rs b/src/stream/src/from_proto/simple_agg.rs index 78ab66df47ae0..5423e4fd2043f 100644 --- a/src/stream/src/from_proto/simple_agg.rs +++ b/src/stream/src/from_proto/simple_agg.rs @@ -14,7 +14,7 @@ //! Streaming Simple Aggregator -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_pb::stream_plan::SimpleAggNode; use super::agg_common::{ @@ -46,9 +46,13 @@ impl ExecutorBuilder for SimpleAggExecutorBuilder { let storages = build_agg_state_storages_from_proto(node.get_agg_call_states(), store.clone(), None) .await; - let result_table = - StateTable::from_table_catalog(node.get_result_table().unwrap(), store.clone(), None) - .await; + // disable sanity check so that old value is not required when updating states + let intermediate_state_table = StateTable::from_table_catalog_inconsistent_op( + node.get_intermediate_state_table().unwrap(), + store.clone(), + None, + ) + .await; let distinct_dedup_tables = build_distinct_dedup_table_from_proto(node.get_distinct_dedup_tables(), store, None) .await; @@ -64,7 +68,7 @@ impl ExecutorBuilder for SimpleAggExecutorBuilder { agg_calls, row_count_index: node.get_row_count_index() as usize, storages, - result_table, + intermediate_state_table, distinct_dedup_tables, watermark_epoch: stream.get_watermark_epoch(), metrics: params.executor_stats, diff --git a/src/stream/src/from_proto/sink.rs b/src/stream/src/from_proto/sink.rs index 0dd05d722b131..47f21c0a223cf 100644 --- a/src/stream/src/from_proto/sink.rs +++ b/src/stream/src/from_proto/sink.rs @@ -14,15 +14,19 @@ use std::sync::Arc; +use anyhow::anyhow; use risingwave_common::catalog::ColumnCatalog; -use risingwave_connector::sink::catalog::SinkType; -use risingwave_connector::sink::{SinkParam, SinkWriterParam}; +use risingwave_connector::match_sink_name_str; +use risingwave_connector::sink::catalog::{SinkFormatDesc, SinkType}; +use risingwave_connector::sink::{ + SinkError, SinkParam, SinkWriterParam, CONNECTOR_TYPE_KEY, SINK_TYPE_OPTION, +}; use risingwave_pb::stream_plan::{SinkLogStoreType, SinkNode}; use risingwave_storage::dispatch_state_store; use super::*; -use crate::common::log_store::in_mem::BoundedInMemLogStoreFactory; -use crate::common::log_store::kv_log_store::KvLogStoreFactory; +use crate::common::log_store_impl::in_mem::BoundedInMemLogStoreFactory; +use crate::common::log_store_impl::kv_log_store::{KvLogStoreFactory, KvLogStoreMetrics}; use crate::executor::SinkExecutor; pub struct SinkExecutorBuilder; @@ -45,7 +49,7 @@ impl ExecutorBuilder for SinkExecutorBuilder { let db_name = sink_desc.get_db_name().into(); let sink_from_name = sink_desc.get_sink_from_name().into(); let properties = sink_desc.get_properties().clone(); - let pk_indices = sink_desc + let downstream_pk = sink_desc .downstream_pk .iter() .map(|i| *i as usize) @@ -56,6 +60,35 @@ impl ExecutorBuilder for SinkExecutorBuilder { .into_iter() .map(ColumnCatalog::from) .collect_vec(); + + let connector = { + let sink_type = properties.get(CONNECTOR_TYPE_KEY).ok_or_else(|| { + SinkError::Config(anyhow!("missing config: {}", CONNECTOR_TYPE_KEY)) + })?; + + match_sink_name_str!( + sink_type.to_lowercase().as_str(), + SinkType, + Ok(SinkType::SINK_NAME), + |other| { + Err(SinkError::Config(anyhow!( + "unsupported sink connector {}", + other + ))) + } + ) + }?; + let format_desc = match &sink_desc.format_desc { + // Case A: new syntax `format ... encode ...` + Some(f) => Some(f.clone().try_into()?), + None => match sink_desc.properties.get(SINK_TYPE_OPTION) { + // Case B: old syntax `type = '...'` + Some(t) => SinkFormatDesc::from_legacy_type(connector, t)?, + // Case C: no format + encode required + None => None, + }, + }; + let sink_param = SinkParam { sink_id, properties, @@ -64,12 +97,30 @@ impl ExecutorBuilder for SinkExecutorBuilder { .filter(|col| !col.is_hidden) .map(|col| col.column_desc.clone()) .collect(), - pk_indices, + downstream_pk, sink_type, + format_desc, db_name, sink_from_name, }; + let identity = format!("SinkExecutor {:X?}", params.executor_id); + let sink_id_str = format!("{}", sink_id.sink_id); + + let sink_metrics = stream.streaming_metrics.new_sink_metrics( + identity.as_str(), + sink_id_str.as_str(), + connector, + ); + + let sink_write_param = SinkWriterParam { + connector_params: params.env.connector_params(), + executor_id: params.executor_id, + vnode_bitmap: params.vnode_bitmap.clone(), + meta_client: params.env.meta_client(), + sink_metrics, + }; + match node.log_store_type() { // Default value is the normal in memory log store to be backward compatible with the // previously unset value @@ -78,44 +129,42 @@ impl ExecutorBuilder for SinkExecutorBuilder { Ok(Box::new( SinkExecutor::new( input_executor, - stream.streaming_metrics.clone(), - SinkWriterParam { - connector_params: params.env.connector_params(), - executor_id: params.executor_id, - vnode_bitmap: params.vnode_bitmap, - meta_client: params.env.meta_client(), - }, + sink_write_param, sink_param, columns, params.actor_context, factory, + params.pk_indices, ) .await?, )) } SinkLogStoreType::KvLogStore => { + let metrics = KvLogStoreMetrics::new( + ¶ms.executor_stats, + &sink_write_param, + &sink_param, + connector, + ); + // TODO: support setting max row count in config dispatch_state_store!(params.env.state_store(), state_store, { let factory = KvLogStoreFactory::new( state_store, node.table.as_ref().unwrap().clone(), params.vnode_bitmap.clone().map(Arc::new), - 0, + 65536, + metrics, ); Ok(Box::new( SinkExecutor::new( input_executor, - stream.streaming_metrics.clone(), - SinkWriterParam { - connector_params: params.env.connector_params(), - executor_id: params.executor_id, - vnode_bitmap: params.vnode_bitmap, - meta_client: params.env.meta_client(), - }, + sink_write_param, sink_param, columns, params.actor_context, factory, + params.pk_indices, ) .await?, )) diff --git a/src/stream/src/from_proto/source.rs b/src/stream/src/from_proto/source.rs deleted file mode 100644 index f8487b98dc6a8..0000000000000 --- a/src/stream/src/from_proto/source.rs +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use risingwave_common::catalog::{ColumnId, Field, Schema, TableId}; -use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::OrderType; -use risingwave_connector::source::external::{ExternalTableType, SchemaTableName}; -use risingwave_connector::source::SourceCtrlOpts; -use risingwave_pb::stream_plan::SourceNode; -use risingwave_source::source_desc::SourceDescBuilder; -use risingwave_storage::panic_store::PanicStateStore; -use tokio::sync::mpsc::unbounded_channel; - -use super::*; -use crate::executor::external::ExternalStorageTable; -use crate::executor::source::StreamSourceCore; -use crate::executor::source_executor::SourceExecutor; -use crate::executor::state_table_handler::SourceStateTableHandler; -use crate::executor::{CdcBackfillExecutor, FsSourceExecutor}; - -const FS_CONNECTORS: &[&str] = &["s3"]; -pub struct SourceExecutorBuilder; - -#[async_trait::async_trait] -impl ExecutorBuilder for SourceExecutorBuilder { - type Node = SourceNode; - - async fn new_boxed_executor( - params: ExecutorParams, - node: &Self::Node, - store: impl StateStore, - stream: &mut LocalStreamManagerCore, - ) -> StreamResult { - let (sender, barrier_receiver) = unbounded_channel(); - stream - .context - .lock_barrier_manager() - .register_sender(params.actor_context.id, sender); - let system_params = params.env.system_params_manager_ref().get_params(); - - if let Some(source) = &node.source_inner { - let source_id = TableId::new(source.source_id); - let source_name = source.source_name.clone(); - let source_info = source.get_info()?; - - let source_desc_builder = SourceDescBuilder::new( - source.columns.clone(), - params.env.source_metrics(), - source.row_id_index.map(|x| x as _), - source.properties.clone(), - source_info.clone(), - params.env.connector_params(), - params.env.config().developer.connector_message_buffer_size, - // `pk_indices` is used to ensure that a message will be skipped instead of parsed - // with null pk when the pk column is missing. - // - // Currently pk_indices for source is always empty since pk information is not - // passed via `StreamSource` so null pk may be emitted to downstream. - // - // TODO: use the correct information to fill in pk_dicies. - // We should consdier add back the "pk_column_ids" field removed by #8841 in - // StreamSource - params.pk_indices.clone(), - ); - - let source_ctrl_opts = SourceCtrlOpts { - chunk_size: params.env.config().developer.chunk_size, - }; - - let column_ids: Vec<_> = source - .columns - .iter() - .map(|column| ColumnId::from(column.get_column_desc().unwrap().column_id)) - .collect(); - let fields = source - .columns - .iter() - .map(|prost| { - let column_desc = prost.column_desc.as_ref().unwrap(); - let data_type = DataType::from(column_desc.column_type.as_ref().unwrap()); - let name = column_desc.name.clone(); - Field::with_name(data_type, name) - }) - .collect(); - let schema = Schema::new(fields); - - let state_table_handler = SourceStateTableHandler::from_table_catalog( - source.state_table.as_ref().unwrap(), - store.clone(), - ) - .await; - let stream_source_core = StreamSourceCore::new( - source_id, - source_name, - column_ids, - source_desc_builder, - state_table_handler, - ); - - let connector = source - .properties - .get("connector") - .map(|c| c.to_ascii_lowercase()) - .unwrap_or_default(); - let is_fs_connector = FS_CONNECTORS.contains(&connector.as_str()); - - if is_fs_connector { - Ok(Box::new(FsSourceExecutor::new( - params.actor_context, - schema, - params.pk_indices, - stream_source_core, - params.executor_stats, - barrier_receiver, - system_params, - params.executor_id, - source_ctrl_opts, - )?)) - } else { - let source_exec = SourceExecutor::new( - params.actor_context.clone(), - schema.clone(), - params.pk_indices.clone(), - Some(stream_source_core), - params.executor_stats.clone(), - barrier_receiver, - system_params, - params.executor_id, - source_ctrl_opts.clone(), - params.env.connector_params(), - ); - - let table_type = ExternalTableType::from_properties(&source.properties); - if table_type.can_backfill() && let Some(table_desc) = source_info.upstream_table.clone() { - let upstream_table_name = SchemaTableName::from_properties(&source.properties); - let pk_indices = table_desc - .pk - .iter() - .map(|k| k.column_index as usize) - .collect_vec(); - - let order_types = table_desc - .pk - .iter() - .map(|desc| OrderType::from_protobuf(desc.get_order_type().unwrap())) - .collect_vec(); - - let table_reader = table_type.create_table_reader(source.properties.clone(), schema.clone())?; - let external_table = ExternalStorageTable::new( - TableId::new(source.source_id), - upstream_table_name, - table_reader, - schema.clone(), - order_types, - pk_indices.clone(), - (0..table_desc.columns.len()).collect_vec(), - ); - - // use the state table from source to store the backfill state (may refactor in future) - let source_state_handler = SourceStateTableHandler::from_table_catalog( - source.state_table.as_ref().unwrap(), - store.clone(), - ).await; - let cdc_backfill = CdcBackfillExecutor::new( - params.actor_context.clone(), - external_table, - Box::new(source_exec), - (0..source.columns.len()).collect_vec(), // eliminate the last column (_rw_offset) - None, - schema.clone(), - pk_indices, - params.executor_stats, - source_state_handler, - source_ctrl_opts.chunk_size - ); - Ok(Box::new(cdc_backfill)) - - } else { - Ok(Box::new(source_exec)) - } - } - } else { - // If there is no external stream source, then no data should be persisted. We pass a - // `PanicStateStore` type here for indication. - Ok(Box::new(SourceExecutor::::new( - params.actor_context, - params.schema, - params.pk_indices, - None, - params.executor_stats, - barrier_receiver, - system_params, - params.executor_id, - // we don't expect any data in, so no need to set chunk_sizes - SourceCtrlOpts::default(), - params.env.connector_params(), - ))) - } - } -} diff --git a/src/stream/src/from_proto/source/fs_fetch.rs b/src/stream/src/from_proto/source/fs_fetch.rs new file mode 100644 index 0000000000000..b6df84c8560e4 --- /dev/null +++ b/src/stream/src/from_proto/source/fs_fetch.rs @@ -0,0 +1,120 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use risingwave_common::catalog::{ColumnId, Field, Schema, TableId}; +use risingwave_common::types::DataType; +use risingwave_connector::source::SourceCtrlOpts; +use risingwave_pb::stream_plan::StreamFsFetchNode; +use risingwave_source::source_desc::SourceDescBuilder; +use risingwave_storage::StateStore; + +use crate::error::StreamResult; +use crate::executor::{ + BoxedExecutor, Executor, FlowControlExecutor, FsFetchExecutor, SourceStateTableHandler, + StreamSourceCore, +}; +use crate::from_proto::ExecutorBuilder; +use crate::task::{ExecutorParams, LocalStreamManagerCore}; + +pub struct FsFetchExecutorBuilder; + +#[async_trait::async_trait] +impl ExecutorBuilder for FsFetchExecutorBuilder { + type Node = StreamFsFetchNode; + + async fn new_boxed_executor( + params: ExecutorParams, + node: &Self::Node, + store: impl StateStore, + _stream: &mut LocalStreamManagerCore, + ) -> StreamResult { + let [upstream]: [_; 1] = params.input.try_into().unwrap(); + + let source = node.node_inner.as_ref().unwrap(); + + let source_id = TableId::new(source.source_id); + let source_name = source.source_name.clone(); + let source_info = source.get_info()?; + + let source_desc_builder = SourceDescBuilder::new( + source.columns.clone(), + params.env.source_metrics(), + source.row_id_index.map(|x| x as _), + source.properties.clone(), + source_info.clone(), + params.env.connector_params(), + params.env.config().developer.connector_message_buffer_size, + params.pk_indices.clone(), + ); + + let source_ctrl_opts = SourceCtrlOpts { + chunk_size: params.env.config().developer.chunk_size, + }; + + let column_ids: Vec<_> = source + .columns + .iter() + .map(|column| ColumnId::from(column.get_column_desc().unwrap().column_id)) + .collect(); + let fields = source + .columns + .iter() + .map(|prost| { + let column_desc = prost.column_desc.as_ref().unwrap(); + let data_type = DataType::from(column_desc.column_type.as_ref().unwrap()); + let name = column_desc.name.clone(); + Field::with_name(data_type, name) + }) + .collect(); + let schema = Schema::new(fields); + + let vnodes = Some(Arc::new( + params + .vnode_bitmap + .expect("vnodes not set for fetch executor"), + )); + let state_table_handler = SourceStateTableHandler::from_table_catalog_with_vnodes( + source.state_table.as_ref().unwrap(), + store.clone(), + vnodes, + ) + .await; + let stream_source_core = StreamSourceCore::new( + source_id, + source_name, + column_ids, + source_desc_builder, + state_table_handler, + ); + + let executor = FsFetchExecutor::new( + params.actor_context, + schema, + params.pk_indices, + stream_source_core, + params.executor_id, + upstream, + source_ctrl_opts, + params.env.connector_params(), + ) + .boxed(); + + if let Ok(rate_limit) = source.get_rate_limit() { + return Ok(FlowControlExecutor::new(executor, *rate_limit).boxed()); + } + Ok(executor) + } +} diff --git a/src/stream/src/from_proto/source/mod.rs b/src/stream/src/from_proto/source/mod.rs new file mode 100644 index 0000000000000..cb83889465a73 --- /dev/null +++ b/src/stream/src/from_proto/source/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod trad_source; +pub use trad_source::SourceExecutorBuilder; +mod fs_fetch; +pub use fs_fetch::FsFetchExecutorBuilder; + +use super::*; diff --git a/src/stream/src/from_proto/source/trad_source.rs b/src/stream/src/from_proto/source/trad_source.rs new file mode 100644 index 0000000000000..3f0793595c7c5 --- /dev/null +++ b/src/stream/src/from_proto/source/trad_source.rs @@ -0,0 +1,234 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::catalog::{ColumnId, Field, Schema, TableId}; +use risingwave_common::types::DataType; +use risingwave_common::util::sort_util::OrderType; +use risingwave_connector::source::external::{ExternalTableType, SchemaTableName}; +use risingwave_connector::source::{ConnectorProperties, SourceCtrlOpts}; +use risingwave_pb::stream_plan::SourceNode; +use risingwave_source::source_desc::SourceDescBuilder; +use risingwave_storage::panic_store::PanicStateStore; +use tokio::sync::mpsc::unbounded_channel; + +use super::*; +use crate::executor::external::ExternalStorageTable; +use crate::executor::source::{FsListExecutor, StreamSourceCore}; +use crate::executor::source_executor::SourceExecutor; +use crate::executor::state_table_handler::SourceStateTableHandler; +use crate::executor::{CdcBackfillExecutor, FlowControlExecutor, FsSourceExecutor}; + +const FS_CONNECTORS: &[&str] = &["s3"]; +pub struct SourceExecutorBuilder; + +#[async_trait::async_trait] +impl ExecutorBuilder for SourceExecutorBuilder { + type Node = SourceNode; + + async fn new_boxed_executor( + params: ExecutorParams, + node: &Self::Node, + store: impl StateStore, + stream: &mut LocalStreamManagerCore, + ) -> StreamResult { + let (sender, barrier_receiver) = unbounded_channel(); + stream + .context + .lock_barrier_manager() + .register_sender(params.actor_context.id, sender); + let system_params = params.env.system_params_manager_ref().get_params(); + + if let Some(source) = &node.source_inner { + let executor = { + let source_id = TableId::new(source.source_id); + let source_name = source.source_name.clone(); + let source_info = source.get_info()?; + + let source_desc_builder = SourceDescBuilder::new( + source.columns.clone(), + params.env.source_metrics(), + source.row_id_index.map(|x| x as _), + source.properties.clone(), + source_info.clone(), + params.env.connector_params(), + params.env.config().developer.connector_message_buffer_size, + // `pk_indices` is used to ensure that a message will be skipped instead of parsed + // with null pk when the pk column is missing. + // + // Currently pk_indices for source is always empty since pk information is not + // passed via `StreamSource` so null pk may be emitted to downstream. + // + // TODO: use the correct information to fill in pk_dicies. + // We should consdier add back the "pk_column_ids" field removed by #8841 in + // StreamSource + params.pk_indices.clone(), + ); + + let source_ctrl_opts = SourceCtrlOpts { + chunk_size: params.env.config().developer.chunk_size, + }; + + let column_ids: Vec<_> = source + .columns + .iter() + .map(|column| ColumnId::from(column.get_column_desc().unwrap().column_id)) + .collect(); + let fields = source + .columns + .iter() + .map(|prost| { + let column_desc = prost.column_desc.as_ref().unwrap(); + let data_type = DataType::from(column_desc.column_type.as_ref().unwrap()); + let name = column_desc.name.clone(); + Field::with_name(data_type, name) + }) + .collect(); + let schema = Schema::new(fields); + + let state_table_handler = SourceStateTableHandler::from_table_catalog( + source.state_table.as_ref().unwrap(), + store.clone(), + ) + .await; + let stream_source_core = StreamSourceCore::new( + source_id, + source_name, + column_ids, + source_desc_builder, + state_table_handler, + ); + + let connector = source + .properties + .get("connector") + .map(|c| c.to_ascii_lowercase()) + .unwrap_or_default(); + let is_fs_connector = FS_CONNECTORS.contains(&connector.as_str()); + let is_fs_v2_connector = + ConnectorProperties::is_new_fs_connector_hash_map(&source.properties); + + if is_fs_connector { + FsSourceExecutor::new( + params.actor_context, + schema, + params.pk_indices, + stream_source_core, + params.executor_stats, + barrier_receiver, + system_params, + params.executor_id, + source_ctrl_opts, + )? + .boxed() + } else if is_fs_v2_connector { + FsListExecutor::new( + params.actor_context.clone(), + schema.clone(), + params.pk_indices.clone(), + Some(stream_source_core), + params.executor_stats.clone(), + barrier_receiver, + system_params, + params.executor_id, + source_ctrl_opts.clone(), + params.env.connector_params(), + ) + .boxed() + } else { + let source_exec = SourceExecutor::new( + params.actor_context.clone(), + schema.clone(), + params.pk_indices.clone(), + Some(stream_source_core), + params.executor_stats.clone(), + barrier_receiver, + system_params, + params.executor_id, + source_ctrl_opts.clone(), + params.env.connector_params(), + ); + + let table_type = ExternalTableType::from_properties(&source.properties); + if table_type.can_backfill() && let Some(table_desc) = source_info.upstream_table.clone() { + let upstream_table_name = SchemaTableName::from_properties(&source.properties); + let table_pk_indices = table_desc + .pk + .iter() + .map(|k| k.column_index as usize) + .collect_vec(); + let table_pk_order_types = table_desc + .pk + .iter() + .map(|desc| OrderType::from_protobuf(desc.get_order_type().unwrap())) + .collect_vec(); + + let table_reader = table_type.create_table_reader(source.properties.clone(), schema.clone())?; + let external_table = ExternalStorageTable::new( + TableId::new(source.source_id), + upstream_table_name, + table_reader, + schema.clone(), + table_pk_order_types, + table_pk_indices, + (0..table_desc.columns.len()).collect_vec(), + ); + + // use the state table from source to store the backfill state (may refactor in future) + let source_state_handler = SourceStateTableHandler::from_table_catalog( + source.state_table.as_ref().unwrap(), + store.clone(), + ).await; + let cdc_backfill = CdcBackfillExecutor::new( + params.actor_context.clone(), + external_table, + Box::new(source_exec), + (0..source.columns.len()).collect_vec(), // eliminate the last column (_rw_offset) + None, + schema.clone(), + params.pk_indices, + params.executor_stats, + source_state_handler, + source_ctrl_opts.chunk_size + ); + cdc_backfill.boxed() + } else { + source_exec.boxed() + } + } + }; + if let Ok(rate_limit) = source.get_rate_limit() { + Ok(FlowControlExecutor::new(executor, *rate_limit).boxed()) + } else { + Ok(executor) + } + } else { + // If there is no external stream source, then no data should be persisted. We pass a + // `PanicStateStore` type here for indication. + Ok(SourceExecutor::::new( + params.actor_context, + params.schema, + params.pk_indices, + None, + params.executor_stats, + barrier_receiver, + system_params, + params.executor_id, + // we don't expect any data in, so no need to set chunk_sizes + SourceCtrlOpts::default(), + params.env.connector_params(), + ) + .boxed()) + } + } +} diff --git a/src/stream/src/from_proto/stateless_simple_agg.rs b/src/stream/src/from_proto/stateless_simple_agg.rs index 37e4be0d7109e..f26316c86e6d0 100644 --- a/src/stream/src/from_proto/stateless_simple_agg.rs +++ b/src/stream/src/from_proto/stateless_simple_agg.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_pb::stream_plan::SimpleAggNode; use super::*; diff --git a/src/stream/src/from_proto/temporal_join.rs b/src/stream/src/from_proto/temporal_join.rs index 1883281f35e8f..58699089e8c27 100644 --- a/src/stream/src/from_proto/temporal_join.rs +++ b/src/stream/src/from_proto/temporal_join.rs @@ -18,7 +18,7 @@ use risingwave_common::catalog::{ColumnDesc, TableId, TableOption}; use risingwave_common::hash::{HashKey, HashKeyDispatcher}; use risingwave_common::types::DataType; use risingwave_common::util::sort_util::OrderType; -use risingwave_expr::expr::{build_from_prost, BoxedExpression}; +use risingwave_expr::expr::{build_non_strict_from_prost, NonStrictExpression}; use risingwave_pb::plan_common::{JoinType as JoinTypeProto, StorageTableDesc}; use risingwave_storage::table::batch_table::storage_table::StorageTable; use risingwave_storage::table::Distribution; @@ -133,7 +133,10 @@ impl ExecutorBuilder for TemporalJoinExecutorBuilder { let null_safe = node.get_null_safe().to_vec(); let condition = match node.get_condition() { - Ok(cond_prost) => Some(build_from_prost(cond_prost)?), + Ok(cond_prost) => Some(build_non_strict_from_prost( + cond_prost, + params.eval_error_report, + )?), Err(_) => None, }; @@ -187,7 +190,7 @@ struct TemporalJoinExecutorDispatcherArgs { left_join_keys: Vec, right_join_keys: Vec, null_safe: Vec, - condition: Option, + condition: Option, pk_indices: PkIndices, output_indices: Vec, table_output_indices: Vec, diff --git a/src/stream/src/from_proto/top_n.rs b/src/stream/src/from_proto/top_n.rs index cf8a0280522c2..f0aa967aae143 100644 --- a/src/stream/src/from_proto/top_n.rs +++ b/src/stream/src/from_proto/top_n.rs @@ -49,15 +49,21 @@ impl ExecutorBuilder for TopNExecutorBuilder { Ok($excutor::<_, $with_ties>::new( input, params.actor_context, + info, storage_key, (node.offset as usize, node.limit as usize), order_by, - params.executor_id, state_table, )? .boxed()) diff --git a/src/stream/src/from_proto/values.rs b/src/stream/src/from_proto/values.rs index 6857e1fc56ef9..077eea3511108 100644 --- a/src/stream/src/from_proto/values.rs +++ b/src/stream/src/from_proto/values.rs @@ -14,7 +14,7 @@ use itertools::Itertools; use risingwave_common::catalog::{Field, Schema}; -use risingwave_expr::expr::build_from_prost; +use risingwave_expr::expr::build_non_strict_from_prost; use risingwave_pb::stream_plan::ValuesNode; use risingwave_storage::StateStore; use tokio::sync::mpsc::unbounded_channel; @@ -53,7 +53,9 @@ impl ExecutorBuilder for ValuesExecutorBuilder { tuple .get_cells() .iter() - .map(|node| build_from_prost(node).unwrap()) + .map(|node| { + build_non_strict_from_prost(node, params.eval_error_report.clone()).unwrap() + }) .collect_vec() }) .collect_vec(); diff --git a/src/stream/src/from_proto/watermark_filter.rs b/src/stream/src/from_proto/watermark_filter.rs index 84b39288c7048..52c452115a4ce 100644 --- a/src/stream/src/from_proto/watermark_filter.rs +++ b/src/stream/src/from_proto/watermark_filter.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use risingwave_expr::expr::build_from_prost; +use risingwave_expr::expr::build_non_strict_from_prost; use risingwave_pb::stream_plan::WatermarkFilterNode; use super::*; @@ -36,7 +36,8 @@ impl ExecutorBuilder for WatermarkFilterBuilder { let [input]: [_; 1] = params.input.try_into().unwrap(); let watermark_descs = node.get_watermark_descs().clone(); let [watermark_desc]: [_; 1] = watermark_descs.try_into().unwrap(); - let watermark_expr = build_from_prost(&watermark_desc.expr.unwrap())?; + let watermark_expr = + build_non_strict_from_prost(&watermark_desc.expr.unwrap(), params.eval_error_report)?; let event_time_col_idx = watermark_desc.watermark_idx as usize; let vnodes = Arc::new( params @@ -55,6 +56,7 @@ impl ExecutorBuilder for WatermarkFilterBuilder { event_time_col_idx, params.actor_context, table, + params.executor_id, ) .boxed()) } diff --git a/src/stream/src/lib.rs b/src/stream/src/lib.rs index db1a3fe7819b6..389dfae7b8c0c 100644 --- a/src/stream/src/lib.rs +++ b/src/stream/src/lib.rs @@ -18,12 +18,11 @@ #![feature(type_alias_impl_trait)] #![feature(more_qualified_paths)] #![feature(lint_reasons)] -#![feature(binary_heap_drain_sorted)] #![feature(let_chains)] #![feature(hash_extract_if)] #![feature(extract_if)] -#![feature(generators)] -#![feature(iter_from_generator)] +#![feature(coroutines)] +#![feature(iter_from_coroutine)] #![feature(proc_macro_hygiene)] #![feature(stmt_expr_attributes)] #![feature(allocator_api)] @@ -37,13 +36,11 @@ #![feature(bound_map)] #![feature(iter_order_by)] #![feature(exact_size_is_empty)] -#![feature(return_position_impl_trait_in_trait)] #![feature(impl_trait_in_assoc_type)] #![feature(test)] #![feature(is_sorted)] #![feature(btree_cursors)] #![feature(assert_matches)] -#![feature(async_fn_in_trait)] #[macro_use] extern crate tracing; @@ -54,3 +51,6 @@ pub mod error; pub mod executor; mod from_proto; pub mod task; + +#[cfg(test)] +risingwave_expr_impl::enable!(); diff --git a/src/stream/src/task/barrier_manager.rs b/src/stream/src/task/barrier_manager.rs index 5581a8529c067..996881d3ff4b0 100644 --- a/src/stream/src/task/barrier_manager.rs +++ b/src/stream/src/task/barrier_manager.rs @@ -101,7 +101,7 @@ impl LocalBarrierManager { /// Register sender for source actors, used to send barriers. pub fn register_sender(&mut self, actor_id: ActorId, sender: UnboundedSender) { - tracing::trace!( + tracing::debug!( target: "events::stream::barrier::manager", actor_id = actor_id, "register sender" @@ -132,7 +132,7 @@ impl LocalBarrierManager { } }; let to_collect: HashSet = actor_ids_to_collect.into_iter().collect(); - trace!( + debug!( target: "events::stream::barrier::manager::send", "send barrier {:?}, senders = {:?}, actor_ids_to_collect = {:?}", barrier, @@ -172,7 +172,7 @@ impl LocalBarrierManager { // Actors to stop should still accept this barrier, but won't get sent to in next times. if let Some(actors) = barrier.all_stop_actors() { - trace!( + debug!( target: "events::stream::barrier::manager", "remove actors {:?} from senders", actors diff --git a/src/stream/src/task/barrier_manager/managed_state.rs b/src/stream/src/task/barrier_manager/managed_state.rs index c438272033831..43aeb4afba46b 100644 --- a/src/stream/src/task/barrier_manager/managed_state.rs +++ b/src/stream/src/task/barrier_manager/managed_state.rs @@ -112,14 +112,14 @@ impl ManagedBarrierState { .into_iter() .map(|(actor, state)| CreateMviewProgress { chain_actor_id: actor, - done: matches!(state, ChainState::Done), + done: matches!(state, ChainState::Done(_)), consumed_epoch: match state { ChainState::ConsumingUpstream(consumed_epoch, _) => consumed_epoch, - ChainState::Done => epoch, + ChainState::Done(_) => epoch, }, consumed_rows: match state { ChainState::ConsumingUpstream(_, consumed_rows) => consumed_rows, - ChainState::Done => 0, + ChainState::Done(consumed_rows) => consumed_rows, }, }) .collect(); @@ -193,12 +193,10 @@ impl ManagedBarrierState { /// Collect a `barrier` from the actor with `actor_id`. pub(super) fn collect(&mut self, actor_id: ActorId, barrier: &Barrier) { - tracing::trace!( + tracing::debug!( target: "events::stream::barrier::manager::collect", - "collect_barrier: epoch = {}, actor_id = {}, state = {:#?}", - barrier.epoch.curr, - actor_id, - self + epoch = barrier.epoch.curr, actor_id, state = ?self, + "collect_barrier", ); match self.epoch_barrier_state_map.get_mut(&barrier.epoch.curr) { diff --git a/src/stream/src/task/barrier_manager/progress.rs b/src/stream/src/task/barrier_manager/progress.rs index f5695013505bb..5abeab216cd00 100644 --- a/src/stream/src/task/barrier_manager/progress.rs +++ b/src/stream/src/task/barrier_manager/progress.rs @@ -23,7 +23,7 @@ type ConsumedRows = u64; #[derive(Debug, Clone, Copy)] pub(super) enum ChainState { ConsumingUpstream(ConsumedEpoch, ConsumedRows), - Done, + Done(ConsumedRows), } impl LocalBarrierManager { @@ -49,6 +49,35 @@ impl LocalBarrierManager { } /// The progress held by the chain executors to report to the local barrier manager. +/// +/// Progress can be computed by +/// `total_rows_consumed` / `total_rows_upstream`. +/// This yields the (approximate) percentage of rows we are done backfilling. +/// +/// For `total_rows_consumed`, the progress is tracked in the following way: +/// 1. Fetching the row count from our state table. +/// This number is the total number, NOT incremental. +/// This is done per actor. +/// 2. Refreshing this number on the meta side, on every barrier. +/// This is done by just summing up all the row counts from the actors. +/// +/// For `total_rows_upstream`, +/// this is fetched from `HummockVersion`'s statistics (`TableStats::total_key_count`). +/// +/// This is computed per `HummockVersion`, which is updated whenever a checkpoint is committed. +/// The `total_key_count` figure just counts the number of storage keys. +/// For example, if a key is inserted and then deleted, +/// it results two storage entries in `LSMt`, so count=2. +/// Only after compaction, the count will drop back to 0. +/// +/// So the total count could be more pessimistic, than actual progress. +/// +/// It is fine for this number not to be precise, +/// since we don't rely on it to update the status of a stream job internally. +/// +/// TODO(kwannoel): Perhaps it is possible to get total key count of the replicated state table +/// for arrangement backfill. We can use that to estimate the progress as well, and avoid recording +/// `row_count` state for it. pub struct CreateMviewProgress { barrier_manager: Arc>, @@ -100,10 +129,15 @@ impl CreateMviewProgress { ) { match self.state { Some(ChainState::ConsumingUpstream(last, last_consumed_rows)) => { - assert!(last < consumed_epoch); + assert!( + last < consumed_epoch, + "last_epoch: {:#?} must be greater than consumed epoch: {:#?}", + last, + consumed_epoch + ); assert!(last_consumed_rows <= current_consumed_rows); } - Some(ChainState::Done) => unreachable!(), + Some(ChainState::Done(_)) => unreachable!(), None => {} }; self.update_inner( @@ -114,11 +148,11 @@ impl CreateMviewProgress { /// Finish the progress. If the progress is already finished, then perform no-op. /// `current_epoch` should be provided to locate the barrier under concurrent checkpoint. - pub fn finish(&mut self, current_epoch: u64) { - if let Some(ChainState::Done) = self.state { + pub fn finish(&mut self, current_epoch: u64, current_consumed_rows: ConsumedRows) { + if let Some(ChainState::Done(_)) = self.state { return; } - self.update_inner(current_epoch, ChainState::Done); + self.update_inner(current_epoch, ChainState::Done(current_consumed_rows)); } } diff --git a/src/stream/src/task/stream_manager.rs b/src/stream/src/task/stream_manager.rs index 2abc8212e2984..f54eb9921f77c 100644 --- a/src/stream/src/task/stream_manager.rs +++ b/src/stream/src/task/stream_manager.rs @@ -33,6 +33,7 @@ use risingwave_common::util::runtime::BackgroundShutdownRuntime; use risingwave_hummock_sdk::LocalSstableInfo; use risingwave_pb::common::ActorInfo; use risingwave_pb::stream_plan; +use risingwave_pb::stream_plan::barrier::BarrierKind; use risingwave_pb::stream_plan::stream_node::NodeBody; use risingwave_pb::stream_plan::StreamNode; use risingwave_storage::monitor::HummockTraceFutureExt; @@ -104,10 +105,26 @@ pub struct LocalStreamManager { total_mem_val: Arc>, } +/// Report expression evaluation errors to the actor context. +/// +/// The struct can be cheaply cloned. +#[derive(Clone)] +pub struct ActorEvalErrorReport { + pub actor_context: ActorContextRef, + pub identity: Arc, +} + +impl risingwave_expr::expr::EvalErrorReport for ActorEvalErrorReport { + fn report(&self, err: risingwave_expr::ExprError) { + self.actor_context.on_compute_error(err, &self.identity); + } +} + pub struct ExecutorParams { pub env: StreamEnvironment, /// Indices of primary keys + // TODO: directly use it for `ExecutorInfo` pub pk_indices: PkIndices, /// Executor id, unique across all actors. @@ -116,12 +133,18 @@ pub struct ExecutorParams { /// Operator id, unique for each operator in fragment. pub operator_id: u64, - /// Information of the operator from plan node. + /// Information of the operator from plan node, like `StreamHashJoin { .. }`. + // TODO: use it for `identity` pub op_info: String, /// The output schema of the executor. + // TODO: directly use it for `ExecutorInfo` pub schema: Schema, + /// The identity of the executor, like `HashJoin 1234ABCD`. + // TODO: directly use it for `ExecutorInfo` + pub identity: String, + /// The input executor. pub input: Vec, @@ -136,6 +159,9 @@ pub struct ExecutorParams { /// Vnodes owned by this executor. Represented in bitmap. pub vnode_bitmap: Option, + + /// Used for reporting expression evaluation errors. + pub eval_error_report: ActorEvalErrorReport, } impl Debug for ExecutorParams { @@ -219,7 +245,7 @@ impl LocalStreamManager { } /// Broadcast a barrier to all senders. Save a receiver in barrier manager - pub fn send_barrier( + pub async fn send_barrier( &self, barrier: &Barrier, actor_ids_to_send: impl IntoIterator, @@ -229,6 +255,11 @@ impl LocalStreamManager { .streaming_metrics .barrier_inflight_latency .start_timer(); + if barrier.kind == BarrierKind::Initial { + let core = self.core.lock().await; + core.get_watermark_epoch() + .store(barrier.epoch.curr, std::sync::atomic::Ordering::SeqCst); + } let mut barrier_manager = self.context.lock_barrier_manager(); barrier_manager.send_barrier( barrier, @@ -446,6 +477,7 @@ impl LocalStreamManagerCore { input: BoxedExecutor, dispatchers: &[stream_plan::Dispatcher], actor_id: ActorId, + fragment_id: FragmentId, ) -> StreamResult { let dispatcher_impls = dispatchers .iter() @@ -456,6 +488,7 @@ impl LocalStreamManagerCore { input, dispatcher_impls, actor_id, + fragment_id, self.context.clone(), self.streaming_metrics.clone(), )) @@ -471,7 +504,6 @@ impl LocalStreamManagerCore { &mut self, fragment_id: FragmentId, node: &stream_plan::StreamNode, - input_pos: usize, env: StreamEnvironment, store: impl StateStore, actor_context: &ActorContextRef, @@ -498,12 +530,11 @@ impl LocalStreamManagerCore { // Create the input executor before creating itself let mut input = Vec::with_capacity(node.input.iter().len()); - for (input_pos, input_stream_node) in node.input.iter().enumerate() { + for input_stream_node in &node.input { input.push( self.create_nodes_inner( fragment_id, input_stream_node, - input_pos, env.clone(), store.clone(), actor_context, @@ -528,12 +559,19 @@ impl LocalStreamManagerCore { let operator_id = unique_operator_id(fragment_id, node.operator_id); let schema = node.fields.iter().map(Field::from).collect(); + let identity = format!("{} {:X}", node.get_node_body().unwrap(), executor_id); + let eval_error_report = ActorEvalErrorReport { + actor_context: actor_context.clone(), + identity: identity.clone().into(), + }; + // Build the executor with params. let executor_params = ExecutorParams { env: env.clone(), - pk_indices, + pk_indices: pk_indices.clone(), executor_id, operator_id, + identity: identity.clone(), op_info, schema, input, @@ -541,17 +579,21 @@ impl LocalStreamManagerCore { executor_stats: self.streaming_metrics.clone(), actor_context: actor_context.clone(), vnode_bitmap, + eval_error_report, }; let executor = create_executor(executor_params, self, node, store).await?; + assert_eq!( + executor.pk_indices(), + &pk_indices, + "`pk_indices` of {} not consistent with what derived by optimizer", + executor.identity() + ); // Wrap the executor for debug purpose. let executor = WrapperExecutor::new( executor, - input_pos, - actor_context.id, - executor_id, - self.streaming_metrics.clone(), + actor_context.clone(), self.config.developer.enable_executor_row_count, ) .boxed(); @@ -587,7 +629,6 @@ impl LocalStreamManagerCore { self.create_nodes_inner( fragment_id, node, - 0, env, store, actor_context, @@ -638,7 +679,8 @@ impl LocalStreamManagerCore { .may_trace_hummock() .await?; - let dispatcher = self.create_dispatcher(executor, &actor.dispatcher, actor_id)?; + let dispatcher = + self.create_dispatcher(executor, &actor.dispatcher, actor_id, actor.fragment_id)?; let actor = Actor::new( dispatcher, subtasks, @@ -673,13 +715,14 @@ impl LocalStreamManagerCore { { let metrics = self.streaming_metrics.clone(); let actor_id_str = actor_id.to_string(); + let fragment_id_str = actor_context.fragment_id.to_string(); let allocation_stated = task_stats_alloc::allocation_stat( instrumented, Duration::from_millis(1000), move |bytes| { metrics .actor_memory_usage - .with_label_values(&[&actor_id_str]) + .with_label_values(&[&actor_id_str, &fragment_id_str]) .set(bytes as i64); actor_context.store_mem_usage(bytes); diff --git a/src/stream/tests/integration_tests/eowc_over_window.rs b/src/stream/tests/integration_tests/eowc_over_window.rs index d7d788680af55..9407b6013dc03 100644 --- a/src/stream/tests/integration_tests/eowc_over_window.rs +++ b/src/stream/tests/integration_tests/eowc_over_window.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr::agg::{AggArgs, AggKind}; -use risingwave_expr::function::window::{Frame, FrameBound, WindowFuncCall, WindowFuncKind}; +use risingwave_expr::aggregate::{AggArgs, AggKind}; +use risingwave_expr::window_function::{Frame, FrameBound, WindowFuncCall, WindowFuncKind}; use risingwave_stream::executor::{EowcOverWindowExecutor, EowcOverWindowExecutorArgs}; use crate::prelude::*; diff --git a/src/stream/tests/integration_tests/hash_agg.rs b/src/stream/tests/integration_tests/hash_agg.rs index 07ab974e24b46..9f4908f252532 100644 --- a/src/stream/tests/integration_tests/hash_agg.rs +++ b/src/stream/tests/integration_tests/hash_agg.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_expr::agg::AggCall; +use risingwave_expr::aggregate::AggCall; use risingwave_stream::executor::test_utils::agg_executor::new_boxed_hash_agg_executor; use crate::prelude::*; @@ -284,7 +284,7 @@ async fn test_hash_agg_emit_on_window_close() { }; check_with_script( - || create_executor(), + create_executor, &format!( r###" - !barrier 1 diff --git a/src/stream/tests/integration_tests/hop_window.rs b/src/stream/tests/integration_tests/hop_window.rs index fff1908fbcf69..9d6d879240fc0 100644 --- a/src/stream/tests/integration_tests/hop_window.rs +++ b/src/stream/tests/integration_tests/hop_window.rs @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_common::cast::str_to_timestamp; use risingwave_common::types::test_utils::IntervalTestExt; -use risingwave_common::types::Interval; +use risingwave_common::types::{Interval, Timestamp}; use risingwave_expr::expr::test_utils::make_hop_window_expression; +use risingwave_expr::expr::NonStrictExpression; use risingwave_stream::executor::{ExecutorInfo, HopWindowExecutor}; use crate::prelude::*; @@ -56,8 +56,14 @@ fn create_executor(output_indices: Vec) -> (MessageSender, BoxedMessageSt TIME_COL_IDX, window_slide, window_size, - window_start_exprs, - window_end_exprs, + window_start_exprs + .into_iter() + .map(NonStrictExpression::for_test) + .collect(), + window_end_exprs + .into_iter() + .map(NonStrictExpression::for_test) + .collect(), output_indices, CHUNK_SIZE, ) @@ -70,28 +76,28 @@ fn push_watermarks(tx: &mut MessageSender) { tx.push_watermark( TIME_COL_IDX, DataType::Timestamp, - str_to_timestamp("2023-07-06 18:27:03").unwrap().into(), + "2023-07-06 18:27:03".parse::().unwrap().into(), ); tx.push_watermark( TIME_COL_IDX, DataType::Timestamp, - str_to_timestamp("2023-07-06 18:29:59").unwrap().into(), + "2023-07-06 18:29:59".parse::().unwrap().into(), ); tx.push_watermark( TIME_COL_IDX, DataType::Timestamp, - str_to_timestamp("2023-07-06 18:30:00").unwrap().into(), + "2023-07-06 18:30:00".parse::().unwrap().into(), ); tx.push_watermark(0, DataType::Int64, 100.into()); tx.push_watermark( TIME_COL_IDX, DataType::Timestamp, - str_to_timestamp("2023-07-06 18:43:40").unwrap().into(), + "2023-07-06 18:43:40".parse::().unwrap().into(), ); tx.push_watermark( TIME_COL_IDX, DataType::Timestamp, - str_to_timestamp("2023-07-06 18:50:00").unwrap().into(), + "2023-07-06 18:50:00".parse::().unwrap().into(), ); } diff --git a/src/stream/tests/integration_tests/main.rs b/src/stream/tests/integration_tests/main.rs index 01b4f4c4d899c..f57aea9bcec14 100644 --- a/src/stream/tests/integration_tests/main.rs +++ b/src/stream/tests/integration_tests/main.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +risingwave_expr_impl::enable!(); + // tests mod eowc_over_window; mod hash_agg; diff --git a/src/stream/tests/integration_tests/over_window.rs b/src/stream/tests/integration_tests/over_window.rs index 4b7b53aaae31b..f59a92df492f1 100644 --- a/src/stream/tests/integration_tests/over_window.rs +++ b/src/stream/tests/integration_tests/over_window.rs @@ -13,8 +13,8 @@ // limitations under the License. use risingwave_common::session_config::OverWindowCachePolicy; -use risingwave_expr::agg::{AggArgs, AggKind}; -use risingwave_expr::function::window::{ +use risingwave_expr::aggregate::{AggArgs, AggKind}; +use risingwave_expr::window_function::{ Frame, FrameBound, FrameExclusion, WindowFuncCall, WindowFuncKind, }; use risingwave_stream::executor::monitor::StreamingMetrics; diff --git a/src/stream/tests/integration_tests/project_set.rs b/src/stream/tests/integration_tests/project_set.rs index bf1354c25b83b..61a879256108d 100644 --- a/src/stream/tests/integration_tests/project_set.rs +++ b/src/stream/tests/integration_tests/project_set.rs @@ -29,10 +29,10 @@ fn create_executor() -> (MessageSender, BoxedMessageStream) { }; let (tx, source) = MockSource::channel(schema, PkIndices::new()); - let test_expr = build_from_pretty("(add:int8 $0:int8 $1:int8)"); - let test_expr_watermark = build_from_pretty("(add:int8 $0:int8 1:int8)"); - let tf1 = repeat(build_from_pretty("1:int4"), 1); - let tf2 = repeat(build_from_pretty("2:int4"), 2); + let test_expr = build_from_pretty("(add:int8 $0:int8 $1:int8)").into_inner(); + let test_expr_watermark = build_from_pretty("(add:int8 $0:int8 1:int8)").into_inner(); + let tf1 = repeat(build_from_pretty("1:int4").into_inner(), 1); + let tf2 = repeat(build_from_pretty("2:int4").into_inner(), 2); let project_set = Box::new(ProjectSetExecutor::new( ActorContext::create(123), diff --git a/src/test_runner/Cargo.toml b/src/test_runner/Cargo.toml index f5ed8b05dc03a..3b9819bd45dad 100644 --- a/src/test_runner/Cargo.toml +++ b/src/test_runner/Cargo.toml @@ -17,6 +17,8 @@ normal = ["workspace-hack"] [dependencies] fail = "0.5" sync-point = { path = "../utils/sync-point" } + +[target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../workspace-hack" } [lints] diff --git a/src/tests/compaction_test/Cargo.toml b/src/tests/compaction_test/Cargo.toml index dd3e5d0a53699..87ad5946b26d5 100644 --- a/src/tests/compaction_test/Cargo.toml +++ b/src/tests/compaction_test/Cargo.toml @@ -27,6 +27,7 @@ risingwave_compactor = { workspace = true } risingwave_hummock_sdk = { workspace = true } risingwave_hummock_test = { workspace = true } risingwave_meta = { workspace = true } +risingwave_meta_node = { workspace = true } risingwave_object_store = { workspace = true } risingwave_pb = { workspace = true } risingwave_rpc_client = { workspace = true } diff --git a/src/tests/compaction_test/src/bin/compaction.rs b/src/tests/compaction_test/src/bin/compaction.rs index 443b79ad625b8..d9ba16f7437b8 100644 --- a/src/tests/compaction_test/src/bin/compaction.rs +++ b/src/tests/compaction_test/src/bin/compaction.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] fn main() { use clap::Parser; diff --git a/src/tests/compaction_test/src/bin/delete_range.rs b/src/tests/compaction_test/src/bin/delete_range.rs index 348a71dc3cce5..592f61a3db4fa 100644 --- a/src/tests/compaction_test/src/bin/delete_range.rs +++ b/src/tests/compaction_test/src/bin/delete_range.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(coverage, feature(no_coverage))] +#![cfg_attr(coverage, feature(coverage_attribute))] -#[cfg_attr(coverage, no_coverage)] +#[cfg_attr(coverage, coverage(off))] fn main() { use clap::Parser; diff --git a/src/tests/compaction_test/src/compaction_test_runner.rs b/src/tests/compaction_test/src/compaction_test_runner.rs index db248ad788fba..cf3e35b48c692 100644 --- a/src/tests/compaction_test/src/compaction_test_runner.rs +++ b/src/tests/compaction_test/src/compaction_test_runner.rs @@ -31,6 +31,7 @@ use risingwave_common::config::{ }; use risingwave_common::util::addr::HostAddr; use risingwave_common::util::iter_util::ZipEqFast; +use risingwave_hummock_sdk::key::TableKey; use risingwave_hummock_sdk::{CompactionGroupId, HummockEpoch, FIRST_VERSION_ID}; use risingwave_pb::common::WorkerType; use risingwave_pb::hummock::{HummockVersion, HummockVersionDelta}; @@ -128,7 +129,7 @@ pub async fn compaction_test_main( } pub async fn start_meta_node(listen_addr: String, state_store: String, config_path: String) { - let meta_opts = risingwave_meta::MetaNodeOpts::parse_from([ + let meta_opts = risingwave_meta_node::MetaNodeOpts::parse_from([ "meta-node", "--listen-addr", &listen_addr, @@ -153,7 +154,7 @@ pub async fn start_meta_node(listen_addr: String, state_store: String, config_pa "enable_compaction_deterministic should be set" ); - risingwave_meta::start(meta_opts).await + risingwave_meta_node::start(meta_opts).await } async fn start_compactor_node( @@ -619,10 +620,11 @@ async fn open_hummock_iters( buf.put_u32(table_id); let b = buf.freeze(); let range = ( - Bound::Included(b.clone()), + Bound::Included(b.clone()).map(TableKey), Bound::Excluded(Bytes::from(risingwave_hummock_sdk::key::next_key( b.as_ref(), - ))), + ))) + .map(TableKey), ); for &epoch in snapshots { diff --git a/src/tests/compaction_test/src/delete_range_runner.rs b/src/tests/compaction_test/src/delete_range_runner.rs index 683cff8fac45d..346cf2fe6acf8 100644 --- a/src/tests/compaction_test/src/delete_range_runner.rs +++ b/src/tests/compaction_test/src/delete_range_runner.rs @@ -29,6 +29,7 @@ use risingwave_common::catalog::hummock::PROPERTIES_RETENTION_SECOND_KEY; use risingwave_common::catalog::TableId; use risingwave_common::config::{extract_storage_memory_config, load_config, NoOverride, RwConfig}; use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; +use risingwave_hummock_sdk::key::TableKey; use risingwave_hummock_test::get_notification_client_for_test; use risingwave_hummock_test::local_state_store_test_utils::LocalStateStoreTestExt; use risingwave_meta::hummock::compaction::compaction_config::CompactionConfigBuilder; @@ -36,7 +37,7 @@ use risingwave_meta::hummock::test_utils::setup_compute_env_with_config; use risingwave_meta::hummock::MockHummockMetaClient; use risingwave_object_store::object::object_metrics::ObjectStoreMetrics; use risingwave_object_store::object::parse_remote_object_store; -use risingwave_pb::catalog::PbTable; +use risingwave_pb::catalog::{PbCreateType, PbStreamJobStatus, PbTable}; use risingwave_pb::hummock::{CompactionConfig, CompactionGroupInfo}; use risingwave_pb::meta::SystemParams; use risingwave_rpc_client::HummockMetaClient; @@ -90,7 +91,8 @@ pub fn start_delete_range(opts: CompactionTestOpts) -> Pin anyhow::Result<()> { let config = load_config(&opts.config_path, NoOverride); - let compaction_config = CompactionConfigBuilder::new().build(); + let compaction_config = + CompactionConfigBuilder::with_opt(&config.meta.compaction_config).build(); compaction_test( compaction_config, config, @@ -150,6 +152,8 @@ async fn compaction_test( cardinality: None, created_at_epoch: None, cleaned_by_watermark: false, + stream_job_status: PbStreamJobStatus::Created.into(), + create_type: PbCreateType::Foreground.into(), }; let mut delete_range_table = delete_key_table.clone(); delete_range_table.id = 2; @@ -207,6 +211,7 @@ async fn compaction_test( 0, FileCache::none(), FileCache::none(), + None, )); let store = HummockStorage::new( @@ -419,7 +424,7 @@ impl NormalState { async fn get_impl(&self, key: &[u8], ignore_range_tombstone: bool) -> Option { self.storage .get( - Bytes::copy_from_slice(key), + TableKey(Bytes::copy_from_slice(key)), ReadOptions { prefix_hint: None, ignore_range_tombstone, @@ -444,8 +449,8 @@ impl NormalState { .storage .iter( ( - Bound::Included(Bytes::copy_from_slice(left)), - Bound::Excluded(Bytes::copy_from_slice(right)), + Bound::Included(TableKey(Bytes::copy_from_slice(left))), + Bound::Excluded(TableKey(Bytes::copy_from_slice(right))), ), ReadOptions { prefix_hint: None, @@ -476,8 +481,8 @@ impl CheckState for NormalState { self.storage .iter( ( - Bound::Included(Bytes::copy_from_slice(left)), - Bound::Excluded(Bytes::copy_from_slice(right)), + Bound::Included(Bytes::copy_from_slice(left)).map(TableKey), + Bound::Excluded(Bytes::copy_from_slice(right)).map(TableKey), ), ReadOptions { prefix_hint: None, @@ -495,7 +500,7 @@ impl CheckState for NormalState { let mut delete_item = Vec::new(); while let Some(item) = iter.next().await { let (full_key, value) = item.unwrap(); - delete_item.push((full_key.user_key.table_key.0, value)); + delete_item.push((full_key.user_key.table_key, value)); } drop(iter); for (key, value) in delete_item { @@ -505,7 +510,11 @@ impl CheckState for NormalState { fn insert(&mut self, key: &[u8], val: &[u8]) { self.storage - .insert(Bytes::from(key.to_vec()), Bytes::copy_from_slice(val), None) + .insert( + TableKey(Bytes::from(key.to_vec())), + Bytes::copy_from_slice(val), + None, + ) .unwrap(); } @@ -575,21 +584,27 @@ fn run_compactor_thread( tokio::task::JoinHandle<()>, tokio::sync::oneshot::Sender<()>, ) { + let filter_key_extractor_manager = + FilterKeyExtractorManager::RpcFilterKeyExtractorManager(filter_key_extractor_manager); let compactor_context = CompactorContext { storage_opts, sstable_store, compactor_metrics, is_share_buffer_compact: false, compaction_executor: Arc::new(CompactionExecutor::new(None)), - filter_key_extractor_manager: FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - filter_key_extractor_manager, - ), + memory_limiter: MemoryLimiter::unlimit(), task_progress_manager: Default::default(), await_tree_reg: None, running_task_count: Arc::new(AtomicU32::new(0)), }; - start_compactor(compactor_context, meta_client, sstable_object_id_manager) + + start_compactor( + compactor_context, + meta_client, + sstable_object_id_manager, + filter_key_extractor_manager, + ) } #[cfg(test)] diff --git a/src/tests/compaction_test/src/lib.rs b/src/tests/compaction_test/src/lib.rs index bbd1174d0c7fe..0bb1a990eebb7 100644 --- a/src/tests/compaction_test/src/lib.rs +++ b/src/tests/compaction_test/src/lib.rs @@ -23,6 +23,7 @@ #![warn(clippy::no_effect_underscore_binding)] #![warn(clippy::await_holding_lock)] #![deny(rustdoc::broken_intra_doc_links)] +#![feature(bound_map)] mod compaction_test_runner; mod delete_range_runner; diff --git a/src/tests/libpq_test/Cargo.lock b/src/tests/libpq_test/Cargo.lock index 38710822ca506..74beef901a9ea 100644 --- a/src/tests/libpq_test/Cargo.lock +++ b/src/tests/libpq_test/Cargo.lock @@ -96,9 +96,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "cc" @@ -249,9 +249,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libloading" @@ -288,9 +288,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "log" @@ -402,11 +402,11 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", diff --git a/src/tests/regress/data/schedule b/src/tests/regress/data/schedule index b735b1b6f332e..90fd97b9ffec0 100644 --- a/src/tests/regress/data/schedule +++ b/src/tests/regress/data/schedule @@ -11,3 +11,4 @@ test: boolean varchar text int2 int4 int8 float4 float8 comments test: strings date time timestamp interval test: case arrays test: jsonb +test: regex diff --git a/src/tests/regress/data/sql/regex.sql b/src/tests/regress/data/sql/regex.sql index b03a8d9ac220b..280748746f407 100644 --- a/src/tests/regress/data/sql/regex.sql +++ b/src/tests/regress/data/sql/regex.sql @@ -21,16 +21,16 @@ select 'abc abd abc' ~ '^(.+)( \1)+$' as f; select 'abc abc abd' ~ '^(.+)( \1)+$' as f; -- Test some cases that crashed in 9.2beta1 due to pmatch[] array overrun -select substring('asd TO foo' from ' TO (([a-z0-9._]+|"([^"]+|"")+")+)'); -select substring('a' from '((a))+'); -select substring('a' from '((a)+)'); +--@ select substring('asd TO foo' from ' TO (([a-z0-9._]+|"([^"]+|"")+")+)'); +--@ select substring('a' from '((a))+'); +--@ select substring('a' from '((a)+)'); -- Test regexp_match() select regexp_match('abc', ''); select regexp_match('abc', 'bc'); select regexp_match('abc', 'd') is null; select regexp_match('abc', '(B)(c)', 'i'); -select regexp_match('abc', 'Bd', 'ig'); -- error +--@ select regexp_match('abc', 'Bd', 'ig'); -- error -- Test lookahead constraints select regexp_matches('ab', 'a(?=b)b*'); @@ -47,7 +47,7 @@ select regexp_matches('abb', '(?<=a)b*'); select regexp_matches('a', 'a(?<=a)b*'); select regexp_matches('abc', 'a(?<=a)b*(?<=b)c*'); select regexp_matches('ab', 'a(?<=a)b*(?<=b)c*'); -select regexp_matches('ab', 'a*(? { vec![ "SET RW_IMPLICIT_FLUSH TO true;\n", - "SET CREATE_COMPACTION_GROUP_FOR_MV TO true;\n", "SET QUERY_MODE TO LOCAL;\n", ] } diff --git a/src/tests/simulation/Cargo.toml b/src/tests/simulation/Cargo.toml index 594f06b3b61c3..b81be65edae42 100644 --- a/src/tests/simulation/Cargo.toml +++ b/src/tests/simulation/Cargo.toml @@ -14,7 +14,7 @@ normal = ["serde"] anyhow = "1.0" async-trait = "0.1" aws-sdk-s3 = { version = "0.2", package = "madsim-aws-sdk-s3" } -cfg-or-panic = "0.1" +cfg-or-panic = "0.2" clap = { version = "4", features = ["derive"] } console = "0.15" etcd-client = { workspace = true } @@ -35,22 +35,22 @@ risingwave_compute = { workspace = true } risingwave_connector = { workspace = true } risingwave_ctl = { workspace = true } risingwave_e2e_extended_mode_test = { path = "../e2e_extended_mode" } +risingwave_expr_impl = { workspace = true } risingwave_frontend = { workspace = true } -risingwave_meta = { workspace = true } +risingwave_meta_node = { workspace = true } risingwave_pb = { workspace = true } risingwave_rpc_client = { workspace = true } risingwave_sqlparser = { workspace = true } risingwave_sqlsmith = { workspace = true } serde = "1.0.188" serde_derive = "1.0.188" -serde_json = "1.0.106" -sqllogictest = "0.15.3" +serde_json = "1.0.107" +sqllogictest = "0.17.0" tempfile = "3" -tikv-jemallocator = { git = "https://github.com/risingwavelabs/jemallocator.git", features = [ - "profiling", -], rev = "b7f9f3" } +tikv-jemallocator = { workspace = true } tokio = { version = "0.2.23", package = "madsim-tokio" } tokio-postgres = "0.7" +tokio-stream = "0.1" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/src/tests/simulation/src/client.rs b/src/tests/simulation/src/client.rs index 089d67bceeeab..bed58d99e3f29 100644 --- a/src/tests/simulation/src/client.rs +++ b/src/tests/simulation/src/client.rs @@ -223,4 +223,10 @@ impl sqllogictest::AsyncDB for RisingWave { async fn sleep(dur: Duration) { tokio::time::sleep(dur).await } + + async fn run_command( + _command: std::process::Command, + ) -> std::io::Result { + unimplemented!("spawning process is not supported in simulation mode") + } } diff --git a/src/tests/simulation/src/cluster.rs b/src/tests/simulation/src/cluster.rs index f375eeac4cc85..6cc6168513cd4 100644 --- a/src/tests/simulation/src/cluster.rs +++ b/src/tests/simulation/src/cluster.rs @@ -255,7 +255,7 @@ impl Cluster { // meta node for i in 1..=conf.meta_nodes { - let opts = risingwave_meta::MetaNodeOpts::parse_from([ + let opts = risingwave_meta_node::MetaNodeOpts::parse_from([ "meta-node", "--config-path", conf.config_path.as_str(), @@ -276,7 +276,7 @@ impl Cluster { .create_node() .name(format!("meta-{i}")) .ip([192, 168, 1, i as u8].into()) - .init(move || risingwave_meta::start(opts.clone())) + .init(move || risingwave_meta_node::start(opts.clone())) .build(); } @@ -655,6 +655,12 @@ impl Session { self.query_tx.send((sql.into(), tx)).await?; rx.await? } + + /// Run `FLUSH` on the session. + pub async fn flush(&mut self) -> Result<()> { + self.run("FLUSH").await?; + Ok(()) + } } /// Options for killing nodes. diff --git a/src/tests/simulation/src/lib.rs b/src/tests/simulation/src/lib.rs index 68c1d0446944d..6cf880d7d66fb 100644 --- a/src/tests/simulation/src/lib.rs +++ b/src/tests/simulation/src/lib.rs @@ -23,3 +23,5 @@ pub mod kafka; pub mod nexmark; pub mod slt; pub mod utils; + +risingwave_expr_impl::enable!(); diff --git a/src/tests/simulation/tests/integration_tests/recovery/backfill.rs b/src/tests/simulation/tests/integration_tests/recovery/backfill.rs index e907528826864..896f65506554e 100644 --- a/src/tests/simulation/tests/integration_tests/recovery/backfill.rs +++ b/src/tests/simulation/tests/integration_tests/recovery/backfill.rs @@ -30,7 +30,7 @@ const SHOW_INTERNAL_TABLES: &str = "SHOW INTERNAL TABLES;"; static EXPECTED_NO_BACKFILL: LazyLock = LazyLock::new(|| { (0..=255) - .map(|vnode| format!("{} NULL t", vnode)) + .map(|vnode| format!("{} NULL t 0", vnode)) .join("\n") }); diff --git a/src/tests/simulation/tests/integration_tests/recovery/background_ddl.rs b/src/tests/simulation/tests/integration_tests/recovery/background_ddl.rs new file mode 100644 index 0000000000000..89df82d4c21a0 --- /dev/null +++ b/src/tests/simulation/tests/integration_tests/recovery/background_ddl.rs @@ -0,0 +1,99 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::time::Duration; + +use anyhow::Result; +use risingwave_simulation::cluster::{Cluster, Configuration, KillOpts}; +use risingwave_simulation::utils::AssertResult; +use tokio::time::sleep; + +async fn kill_cn_and_wait_recover(cluster: &Cluster) { + // Kill it again + for _ in 0..5 { + cluster + .kill_node(&KillOpts { + kill_rate: 1.0, + kill_meta: false, + kill_frontend: false, + kill_compute: true, + kill_compactor: false, + restart_delay_secs: 1, + }) + .await; + sleep(Duration::from_secs(2)).await; + } + sleep(Duration::from_secs(10)).await; +} + +async fn kill_and_wait_recover(cluster: &Cluster) { + // Kill it again + for _ in 0..5 { + sleep(Duration::from_secs(2)).await; + cluster.kill_node(&KillOpts::ALL).await; + } + sleep(Duration::from_secs(20)).await; +} + +#[tokio::test] +async fn test_background_mv_barrier_recovery() -> Result<()> { + let mut cluster = Cluster::start(Configuration::for_backfill()).await?; + let mut session = cluster.start_session(); + + session.run("CREATE TABLE t1 (v1 int);").await?; + session + .run("INSERT INTO t1 select * from generate_series(1, 400000)") + .await?; + session.run("flush").await?; + session.run("SET BACKGROUND_DDL=true;").await?; + session + .run("create materialized view m1 as select * from t1;") + .await?; + + // If the CN is killed before first barrier pass for the MV, the MV will be dropped. + // This is because it's table fragments will NOT be committed until first barrier pass. + sleep(Duration::from_secs(5)).await; + kill_cn_and_wait_recover(&cluster).await; + + // Send some upstream updates. + cluster + .run("INSERT INTO t1 select * from generate_series(1, 100000);") + .await?; + cluster.run("flush;").await?; + + kill_cn_and_wait_recover(&cluster).await; + + kill_and_wait_recover(&cluster).await; + + // Send some upstream updates. + cluster + .run("INSERT INTO t1 select * from generate_series(1, 100000);") + .await?; + cluster.run("flush;").await?; + + // Now just wait for it to complete. + + sleep(Duration::from_secs(10)).await; + + // Make sure after finished, we should have 5000_000 rows. + session + .run("SELECT COUNT(v1) FROM m1") + .await? + .assert_result_eq("600000"); + + session.run("DROP MATERIALIZED VIEW m1").await?; + session.run("DROP TABLE t1").await?; + + Ok(()) +} diff --git a/src/tests/simulation/tests/integration_tests/recovery/mod.rs b/src/tests/simulation/tests/integration_tests/recovery/mod.rs index 565487e8d7dbd..2430daad760a1 100644 --- a/src/tests/simulation/tests/integration_tests/recovery/mod.rs +++ b/src/tests/simulation/tests/integration_tests/recovery/mod.rs @@ -13,5 +13,6 @@ // limitations under the License. mod backfill; +mod background_ddl; mod nexmark_recovery; mod pause_on_bootstrap; diff --git a/src/tests/simulation/tests/integration_tests/recovery/pause_on_bootstrap.rs b/src/tests/simulation/tests/integration_tests/recovery/pause_on_bootstrap.rs index d0288e6931e88..0eea61da67dfb 100644 --- a/src/tests/simulation/tests/integration_tests/recovery/pause_on_bootstrap.rs +++ b/src/tests/simulation/tests/integration_tests/recovery/pause_on_bootstrap.rs @@ -15,29 +15,43 @@ use std::time::Duration; use anyhow::Result; -use risingwave_simulation::cluster::Configuration; +use risingwave_simulation::cluster::{Cluster, Configuration}; use risingwave_simulation::nexmark::NexmarkCluster; use risingwave_simulation::utils::AssertResult; use tokio::time::{sleep, timeout}; -const CREATE_TABLE: &str = "CREATE TABLE t (v int)"; -const INSERT_INTO_TABLE: &str = "INSERT INTO t VALUES (1)"; -const SELECT_COUNT_TABLE: &str = "SELECT COUNT(*) FROM t"; - -const CREATE: &str = "CREATE MATERIALIZED VIEW count_bid as SELECT COUNT(*) FROM bid"; -const SELECT: &str = "SELECT * FROM count_bid"; - -const CREATE_2: &str = "CREATE MATERIALIZED VIEW count_auction as SELECT COUNT(*) FROM auction"; -const SELECT_2: &str = "SELECT * FROM count_auction"; - const SET_PARAMETER: &str = "ALTER SYSTEM SET pause_on_next_bootstrap TO true"; +#[derive(Clone, Copy)] enum ResumeBy { Risectl, Restart, } +impl ResumeBy { + async fn resume(self, cluster: &mut Cluster) -> Result<()> { + match self { + ResumeBy::Risectl => cluster.resume().await?, + ResumeBy::Restart => cluster.kill_nodes(["meta-1"], 0).await, + }; + Ok(()) + } +} + async fn test_impl(resume_by: ResumeBy) -> Result<()> { + const CREATE_TABLE: &str = "CREATE TABLE t (v int)"; + const INSERT_INTO_TABLE: &str = "INSERT INTO t VALUES (1)"; + const SELECT_COUNT_TABLE: &str = "SELECT COUNT(*) FROM t"; + + const CREATE: &str = "CREATE MATERIALIZED VIEW count_bid as SELECT COUNT(*) FROM bid"; + const SELECT: &str = "SELECT * FROM count_bid"; + + const CREATE_2: &str = "CREATE MATERIALIZED VIEW count_auction as SELECT COUNT(*) FROM auction"; + const SELECT_2: &str = "SELECT * FROM count_auction"; + + const CREATE_VALUES: &str = "CREATE MATERIALIZED VIEW values as VALUES (1), (2), (3)"; + const SELECT_VALUES: &str = "SELECT count(*) FROM values"; + let mut cluster = NexmarkCluster::new( Configuration { meta_nodes: 1, @@ -77,18 +91,21 @@ async fn test_impl(resume_by: ResumeBy) -> Result<()> { // New streaming jobs should also start from paused. cluster.run(CREATE_2).await?; sleep(Duration::from_secs(10)).await; - cluster.run(SELECT_2).await?.assert_result_eq("0"); // even there's no data from source, the + cluster.run(SELECT_2).await?.assert_result_eq("0"); // even there's no data from source, the aggregation // result will be 0 instead of empty or NULL + // `VALUES` should also be paused. + tokio::time::timeout(Duration::from_secs(10), cluster.run(CREATE_VALUES)) + .await + .expect_err("`VALUES` should be paused so creation should never complete"); + // DML on tables should be blocked. let result = timeout(Duration::from_secs(10), cluster.run(INSERT_INTO_TABLE)).await; assert!(result.is_err()); cluster.run(SELECT_COUNT_TABLE).await?.assert_result_eq("0"); - match resume_by { - ResumeBy::Risectl => cluster.resume().await?, - ResumeBy::Restart => cluster.kill_nodes(["meta-1"], 0).await, - } + // Resume the cluster. + resume_by.resume(&mut cluster).await?; sleep(Duration::from_secs(10)).await; // The source should be resumed. @@ -100,17 +117,22 @@ async fn test_impl(resume_by: ResumeBy) -> Result<()> { { let mut session = cluster.start_session(); - session.run("FLUSH").await?; + session.flush().await?; let count: i64 = session.run(SELECT_COUNT_TABLE).await?.parse().unwrap(); session.run(INSERT_INTO_TABLE).await?; - session.run("FLUSH").await?; + session.flush().await?; session .run(SELECT_COUNT_TABLE) .await? .assert_result_eq(format!("{}", count + 1)); } + if let ResumeBy::Risectl = resume_by { + // `VALUES` should be successfully created + cluster.run(SELECT_VALUES).await?.assert_result_eq("3"); + } + Ok(()) } @@ -123,3 +145,64 @@ async fn test_pause_on_bootstrap_resume_by_risectl() -> Result<()> { async fn test_pause_on_bootstrap_resume_by_restart() -> Result<()> { test_impl(ResumeBy::Restart).await } + +// The idea is similar to `e2e_test/batch/transaction/now.slt`. +async fn test_temporal_filter(resume_by: ResumeBy) -> Result<()> { + const CREATE_TABLE: &str = "create table t (ts timestamp)"; + const CREATE_TEMPORAL_FILTER: &str = + "create materialized view mv as select count(*) from t where ts at time zone 'utc' >= now()"; + const INSERT_TIMESTAMPS: &str = " + insert into t select * from generate_series( + now() at time zone 'utc' - interval '10' second, + now() at time zone 'utc' + interval '20' second, + interval '1' second / 20 + ); + "; + const SELECT: &str = "select * from mv"; + + let mut cluster = Cluster::start(Configuration { + meta_nodes: 1, + ..Configuration::for_scale() + }) + .await?; + + cluster.run(SET_PARAMETER).await?; + + { + let mut session = cluster.start_session(); + session.run(CREATE_TABLE).await?; + session.run(CREATE_TEMPORAL_FILTER).await?; + session.run(INSERT_TIMESTAMPS).await?; + session.flush().await?; + }; + + // Kill the meta node and wait for the service to recover. + cluster.kill_nodes(["meta-1"], 0).await; + sleep(Duration::from_secs(10)).await; + + let count: i32 = cluster.run(SELECT).await?.parse()?; + assert_ne!(count, 0, "the following tests are meaningless"); + + sleep(Duration::from_secs(10)).await; + let new_count: i32 = cluster.run(SELECT).await?.parse()?; + assert_eq!(count, new_count, "temporal filter should have been paused"); + + // Resume the cluster. + resume_by.resume(&mut cluster).await?; + sleep(Duration::from_secs(40)).await; // 40 seconds is enough for all timestamps to be expired + + let count: i32 = cluster.run(SELECT).await?.parse()?; + assert_eq!(count, 0, "temporal filter should have been resumed"); + + Ok(()) +} + +#[tokio::test] +async fn test_pause_on_bootstrap_temporal_filter_resume_by_risectl() -> Result<()> { + test_temporal_filter(ResumeBy::Risectl).await +} + +#[tokio::test] +async fn test_pause_on_bootstrap_temporal_filter_resume_by_restart() -> Result<()> { + test_temporal_filter(ResumeBy::Restart).await +} diff --git a/src/tests/simulation/tests/integration_tests/scale/cascade_materialized_view.rs b/src/tests/simulation/tests/integration_tests/scale/cascade_materialized_view.rs index c05e52c927424..776692b2fab90 100644 --- a/src/tests/simulation/tests/integration_tests/scale/cascade_materialized_view.rs +++ b/src/tests/simulation/tests/integration_tests/scale/cascade_materialized_view.rs @@ -25,7 +25,7 @@ const ROOT_TABLE_CREATE: &str = "create table t1 (v1 int);"; const MV1: &str = "create materialized view m1 as select * from t1 where v1 > 5;"; const MV2: &str = "create materialized view m2 as select * from t1 where v1 > 10;"; const MV3: &str = "create materialized view m3 as select * from m2 where v1 < 15;"; -const MV4: &str = "create materialized view m4 as select m1.v1 as m1v, m3.v1 as m3v from m1 join m3 on m1.v1 = m3.v1;"; +const MV4: &str = "create materialized view m4 as select m1.v1 as m1v, m3.v1 as m3v from m1 join m3 on m1.v1 = m3.v1 limit 100;"; const MV5: &str = "create materialized view m5 as select * from m4;"; #[tokio::test] @@ -40,6 +40,7 @@ async fn test_simple_cascade_materialized_view() -> Result<()> { .locate_one_fragment([ identity_contains("materialize"), no_identity_contains("chain"), + no_identity_contains("topn"), no_identity_contains("hashjoin"), ]) .await?; @@ -129,6 +130,7 @@ async fn test_diamond_cascade_materialized_view() -> Result<()> { .locate_one_fragment([ identity_contains("materialize"), no_identity_contains("chain"), + no_identity_contains("topn"), no_identity_contains("hashjoin"), ]) .await?; diff --git a/src/tests/simulation/tests/integration_tests/scale/plan.rs b/src/tests/simulation/tests/integration_tests/scale/plan.rs index c7244dc826b42..8b62a58998a3f 100644 --- a/src/tests/simulation/tests/integration_tests/scale/plan.rs +++ b/src/tests/simulation/tests/integration_tests/scale/plan.rs @@ -39,10 +39,7 @@ async fn test_resize_normal() -> Result<()> { .await?; let join_fragment = cluster - .locate_one_fragment([ - identity_contains("hashJoin"), - identity_contains("materialize"), - ]) + .locate_one_fragment([identity_contains("hashJoin")]) .await?; let join_fragment_id = join_fragment.inner.fragment_id; @@ -270,7 +267,7 @@ async fn test_resize_no_shuffle() -> Result<()> { session .run( "create materialized view mv7 as select mv1.v as mv1v, mv5.v as mv5v from mv1 -join mv5 on mv1.v = mv5.v;", +join mv5 on mv1.v = mv5.v limit 1;", ) .await?; @@ -316,6 +313,7 @@ join mv5 on mv1.v = mv5.v;", let top_materialize_fragment = cluster .locate_one_fragment([ identity_contains("materialize"), + no_identity_contains("topn"), no_identity_contains("chain"), no_identity_contains("hashJoin"), ]) diff --git a/src/tests/simulation/tests/integration_tests/sink/basic.rs b/src/tests/simulation/tests/integration_tests/sink/basic.rs index a12bc3643b542..bceb45a8a2389 100644 --- a/src/tests/simulation/tests/integration_tests/sink/basic.rs +++ b/src/tests/simulation/tests/integration_tests/sink/basic.rs @@ -13,6 +13,7 @@ // limitations under the License. use std::io::Write; +use std::iter::once; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; @@ -20,15 +21,24 @@ use std::time::Duration; use anyhow::Result; use async_trait::async_trait; +use futures::stream::select_all; +use futures::StreamExt; use itertools::Itertools; use rand::prelude::SliceRandom; -use risingwave_common::array::StreamChunk; +use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::buffer::Bitmap; +use risingwave_common::types::{DataType, ScalarImpl}; +use risingwave_common::util::chunk_coalesce::DataChunkBuilder; use risingwave_connector::sink::boxed::{BoxCoordinator, BoxWriter}; use risingwave_connector::sink::test_sink::registry_build_sink; -use risingwave_connector::sink::{Sink, SinkWriter, SinkWriterParam}; +use risingwave_connector::sink::writer::SinkWriter; +use risingwave_connector::sink::{Sink, SinkWriterParam}; +use risingwave_connector::source::test_source::{registry_test_source, BoxSource, TestSourceSplit}; +use risingwave_connector::source::StreamChunkWithState; use risingwave_simulation::cluster::{Cluster, ConfigPath, Configuration}; +use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; use tokio::time::sleep; +use tokio_stream::wrappers::UnboundedReceiverStream; struct TestWriter { row_counter: Arc, @@ -65,33 +75,19 @@ impl Drop for TestWriter { } } -struct TestSink { - row_counter: Arc, - parallelism_counter: Arc, -} - -#[async_trait] -impl Sink for TestSink { - type Coordinator = BoxCoordinator; - type Writer = BoxWriter<()>; - - async fn validate( - &self, - _client: Option, - ) -> risingwave_connector::sink::Result<()> { - Ok(()) - } - - async fn new_writer( - &self, - _writer_param: SinkWriterParam, - ) -> risingwave_connector::sink::Result { - self.parallelism_counter.fetch_add(1, Relaxed); - Ok(Box::new(TestWriter { - parallelism_counter: self.parallelism_counter.clone(), - row_counter: self.row_counter.clone(), - })) +fn build_stream_chunk(row_iter: impl Iterator) -> StreamChunk { + let mut builder = DataChunkBuilder::new(vec![DataType::Int32, DataType::Varchar], 100000); + for (id, name) in row_iter { + assert!(builder + .append_one_row([ + Some(ScalarImpl::Int32(id)), + Some(ScalarImpl::Utf8(name.into())), + ]) + .is_none()); } + let chunk = builder.consume_all().unwrap(); + let ops = (0..chunk.cardinality()).map(|_| Op::Insert).collect_vec(); + StreamChunk::from_parts(ops, chunk) } #[tokio::test] @@ -121,40 +117,67 @@ async fn test_sink_basic() -> Result<()> { let _sink_guard = registry_build_sink({ let row_counter = row_counter.clone(); let parallelism_counter = parallelism_counter.clone(); - move |_param| { - Ok(Box::new(TestSink { + move |_, _| { + parallelism_counter.fetch_add(1, Relaxed); + Box::new(TestWriter { row_counter: row_counter.clone(), parallelism_counter: parallelism_counter.clone(), - })) + }) } }); + let source_parallelism = 12; + let mut txs = Vec::new(); + let mut rxs = Vec::new(); + for _ in 0..source_parallelism { + let (tx, rx): (_, UnboundedReceiver) = unbounded_channel(); + txs.push(tx); + rxs.push(Some(rx)); + } + + let _source_guard = registry_test_source(BoxSource::new( + move |_, _| { + Ok((0..source_parallelism) + .map(|i: usize| TestSourceSplit { + id: format!("{}", i).as_str().into(), + properties: Default::default(), + offset: "".to_string(), + }) + .collect_vec()) + }, + move |_, splits, _, _, _| { + select_all(splits.into_iter().map(|split| { + let id: usize = split.id.parse().unwrap(); + let rx = rxs[id].take().unwrap(); + UnboundedReceiverStream::new(rx).map(|chunk| Ok(StreamChunkWithState::from(chunk))) + })) + .boxed() + }, + )); + let mut session = cluster.start_session(); session.run("set streaming_parallelism = 6").await?; session.run("set sink_decouple = false").await?; session - .run("create table test_table (id int, name varchar)") + .run("create table test_table (id int primary key, name varchar) with (connector = 'test') FORMAT PLAIN ENCODE JSON") .await?; session .run("create sink test_sink from test_table with (connector = 'test')") .await?; let mut count = 0; - let mut id_list = (0..100000).collect_vec(); + let mut id_list: Vec = (0..100000).collect_vec(); id_list.shuffle(&mut rand::thread_rng()); let flush_freq = 50; - for id in &id_list[0..1000] { - session - .run(format!( - "insert into test_table values ({}, 'name-{}')", - id, id - )) - .await?; + for id in &id_list[0..10000] { + let chunk = build_stream_chunk(once((*id as i32, format!("name-{}", id)))); + txs[id % source_parallelism].send(chunk).unwrap(); count += 1; if count % flush_freq == 0 { - session.run("flush").await?; + sleep(Duration::from_millis(10)).await; } } + sleep(Duration::from_millis(10000)).await; assert_eq!(6, parallelism_counter.load(Relaxed)); assert_eq!(count, row_counter.load(Relaxed)); @@ -193,20 +216,50 @@ async fn test_sink_decouple_basic() -> Result<()> { let _sink_guard = registry_build_sink({ let row_counter = row_counter.clone(); let parallelism_counter = parallelism_counter.clone(); - move |_param| { - Ok(Box::new(TestSink { + move |_, _| { + parallelism_counter.fetch_add(1, Relaxed); + Box::new(TestWriter { row_counter: row_counter.clone(), parallelism_counter: parallelism_counter.clone(), - })) + }) } }); + let source_parallelism = 12; + let mut txs = Vec::new(); + let mut rxs = Vec::new(); + for _ in 0..source_parallelism { + let (tx, rx): (_, UnboundedReceiver) = unbounded_channel(); + txs.push(tx); + rxs.push(Some(rx)); + } + + let _source_guard = registry_test_source(BoxSource::new( + move |_, _| { + Ok((0..source_parallelism) + .map(|i: usize| TestSourceSplit { + id: format!("{}", i).as_str().into(), + properties: Default::default(), + offset: "".to_string(), + }) + .collect_vec()) + }, + move |_, splits, _, _, _| { + select_all(splits.into_iter().map(|split| { + let id: usize = split.id.parse().unwrap(); + let rx = rxs[id].take().unwrap(); + UnboundedReceiverStream::new(rx).map(|chunk| Ok(StreamChunkWithState::from(chunk))) + })) + .boxed() + }, + )); + let mut session = cluster.start_session(); session.run("set streaming_parallelism = 6").await?; session.run("set sink_decouple = true").await?; session - .run("create table test_table (id int, name varchar)") + .run("create table test_table (id int primary key, name varchar) with (connector = 'test') FORMAT PLAIN ENCODE JSON") .await?; session .run("create sink test_sink from test_table with (connector = 'test')") @@ -217,16 +270,12 @@ async fn test_sink_decouple_basic() -> Result<()> { let mut id_list = (0..100000).collect_vec(); id_list.shuffle(&mut rand::thread_rng()); let flush_freq = 50; - for id in &id_list[0..1000] { - session - .run(format!( - "insert into test_table values ({}, 'name-{}')", - id, id - )) - .await?; + for id in &id_list[0..10000] { + let chunk = build_stream_chunk(once((*id as i32, format!("name-{}", id)))); + txs[id % source_parallelism].send(chunk).unwrap(); count += 1; if count % flush_freq == 0 { - session.run("flush").await?; + sleep(Duration::from_millis(10)).await; } } @@ -242,3 +291,81 @@ async fn test_sink_decouple_basic() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_sink_decouple_blackhole() -> Result<()> { + let config_path = { + let mut file = tempfile::NamedTempFile::new().expect("failed to create temp config file"); + file.write_all(include_bytes!("../../../../../config/ci-sim.toml")) + .expect("failed to write config file"); + file.into_temp_path() + }; + + let mut cluster = Cluster::start(Configuration { + config_path: ConfigPath::Temp(config_path.into()), + frontend_nodes: 1, + compute_nodes: 3, + meta_nodes: 1, + compactor_nodes: 1, + compute_node_cores: 2, + etcd_timeout_rate: 0.0, + etcd_data_path: None, + }) + .await?; + + let source_parallelism = 12; + let mut txs = Vec::new(); + let mut rxs = Vec::new(); + for _ in 0..source_parallelism { + let (tx, rx): (_, UnboundedReceiver) = unbounded_channel(); + txs.push(tx); + rxs.push(Some(rx)); + } + + let _source_guard = registry_test_source(BoxSource::new( + move |_, _| { + Ok((0..source_parallelism) + .map(|i: usize| TestSourceSplit { + id: format!("{}", i).as_str().into(), + properties: Default::default(), + offset: "".to_string(), + }) + .collect_vec()) + }, + move |_, splits, _, _, _| { + select_all(splits.into_iter().map(|split| { + let id: usize = split.id.parse().unwrap(); + let rx = rxs[id].take().unwrap(); + UnboundedReceiverStream::new(rx).map(|chunk| Ok(StreamChunkWithState::from(chunk))) + })) + .boxed() + }, + )); + + let mut session = cluster.start_session(); + + session.run("set streaming_parallelism = 6").await?; + session.run("set sink_decouple = true").await?; + session + .run("create table test_table (id int primary key, name varchar) with (connector = 'test') FORMAT PLAIN ENCODE JSON") + .await?; + session + .run("create sink test_sink from test_table with (connector = 'blackhole')") + .await?; + + let mut count = 0; + let mut id_list = (0..100000).collect_vec(); + id_list.shuffle(&mut rand::thread_rng()); + let flush_freq = 50; + for id in &id_list[0..10000] { + let chunk = build_stream_chunk(once((*id as i32, format!("name-{}", id)))); + txs[id % source_parallelism].send(chunk).unwrap(); + count += 1; + if count % flush_freq == 0 { + sleep(Duration::from_millis(10)).await; + } + } + + session.run("drop sink test_sink").await?; + Ok(()) +} diff --git a/src/tests/sqlsmith/Cargo.toml b/src/tests/sqlsmith/Cargo.toml index 57acbc8d94cca..402c6119cd1cf 100644 --- a/src/tests/sqlsmith/Cargo.toml +++ b/src/tests/sqlsmith/Cargo.toml @@ -23,10 +23,11 @@ rand_chacha = { version = "0.3.1" } regex = "1" risingwave_common = { workspace = true } risingwave_expr = { workspace = true } +risingwave_expr_impl = { workspace = true } risingwave_frontend = { workspace = true } risingwave_pb = { workspace = true } risingwave_sqlparser = { workspace = true } -similar = "2.2.1" +similar = "2.3.0" tokio = { version = "0.2", package = "madsim-tokio" } tokio-postgres = "0.7" tracing = "0.1" diff --git a/src/tests/sqlsmith/src/lib.rs b/src/tests/sqlsmith/src/lib.rs index 2f7e1ce5eb14b..ebb0682a7aaaf 100644 --- a/src/tests/sqlsmith/src/lib.rs +++ b/src/tests/sqlsmith/src/lib.rs @@ -17,6 +17,8 @@ #![feature(lazy_cell)] #![feature(box_patterns)] +risingwave_expr_impl::enable!(); + use std::collections::{HashMap, HashSet}; use anyhow::{bail, Result}; diff --git a/src/tests/sqlsmith/src/sql_gen/agg.rs b/src/tests/sqlsmith/src/sql_gen/agg.rs index 6c1bd2bd26dcf..c42eb6c7b0ffc 100644 --- a/src/tests/sqlsmith/src/sql_gen/agg.rs +++ b/src/tests/sqlsmith/src/sql_gen/agg.rs @@ -15,7 +15,8 @@ use rand::seq::SliceRandom; use rand::Rng; use risingwave_common::types::DataType; -use risingwave_expr::agg::AggKind; +use risingwave_expr::aggregate::AggKind; +use risingwave_expr::sig::SigDataType; use risingwave_sqlparser::ast::{ Expr, Function, FunctionArg, FunctionArgExpr, Ident, ObjectName, OrderByExpr, }; @@ -30,13 +31,12 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { Some(funcs) => funcs, }; let func = funcs.choose(&mut self.rng).unwrap(); - if matches!( - (func.func, func.inputs_type.as_slice()), - ( - AggKind::Min | AggKind::Max, - [DataType::Boolean | DataType::Jsonb] + if matches!(func.name.as_aggregate(), AggKind::Min | AggKind::Max) + && matches!( + func.ret_type, + SigDataType::Exact(DataType::Boolean | DataType::Jsonb) ) - ) { + { return self.gen_simple_scalar(ret); } @@ -45,13 +45,13 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { let exprs: Vec = func .inputs_type .iter() - .map(|t| self.gen_expr(t, context)) + .map(|t| self.gen_expr(t.as_exact(), context)) .collect(); // DISTINCT now only works with agg kinds except `ApproxCountDistinct`, and with at least // one argument and only the first being non-constant. See `Binder::bind_normal_agg` // for more details. - let distinct_allowed = func.func != AggKind::ApproxCountDistinct + let distinct_allowed = func.name.as_aggregate() != AggKind::ApproxCountDistinct && !exprs.is_empty() && exprs.iter().skip(1).all(|e| matches!(e, Expr::Value(_))); let distinct = distinct_allowed && self.flip_coin(); @@ -79,7 +79,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { } else { vec![] }; - self.make_agg_expr(func.func, &exprs, distinct, filter, order_by) + self.make_agg_expr(func.name.as_aggregate(), &exprs, distinct, filter, order_by) .unwrap_or_else(|| self.gen_simple_scalar(ret)) } diff --git a/src/tests/sqlsmith/src/sql_gen/expr.rs b/src/tests/sqlsmith/src/sql_gen/expr.rs index f9772c97d4b5c..9999dcd9ea641 100644 --- a/src/tests/sqlsmith/src/sql_gen/expr.rs +++ b/src/tests/sqlsmith/src/sql_gen/expr.rs @@ -16,7 +16,8 @@ use itertools::Itertools; use rand::seq::SliceRandom; use rand::Rng; use risingwave_common::types::{DataType, DataTypeName, StructType}; -use risingwave_frontend::expr::{agg_func_sigs, cast_sigs, func_sigs}; +use risingwave_expr::sig::cast::cast_sigs; +use risingwave_expr::sig::FUNCTION_REGISTRY; use risingwave_sqlparser::ast::{Expr, Ident, OrderByExpr, Value}; use crate::sql_gen::types::data_type_to_ast_data_type; @@ -302,29 +303,25 @@ pub(crate) fn sql_null() -> Expr { // Add variadic function signatures. Can add these functions // to a FUNC_TABLE too. pub fn print_function_table() -> String { - let func_str = func_sigs() + let func_str = FUNCTION_REGISTRY + .iter_scalars() .map(|sign| { format!( - "{:?}({}) -> {:?}", - sign.func, - sign.inputs_type - .iter() - .map(|arg| format!("{:?}", arg)) - .join(", "), + "{}({}) -> {}", + sign.name, + sign.inputs_type.iter().format(", "), sign.ret_type, ) }) .join("\n"); - let agg_func_str = agg_func_sigs() + let agg_func_str = FUNCTION_REGISTRY + .iter_aggregates() .map(|sign| { format!( - "{:?}({}) -> {:?}", - sign.func, - sign.inputs_type - .iter() - .map(|arg| format!("{:?}", arg)) - .join(", "), + "{}({}) -> {}", + sign.name, + sign.inputs_type.iter().format(", "), sign.ret_type, ) }) diff --git a/src/tests/sqlsmith/src/sql_gen/functions.rs b/src/tests/sqlsmith/src/sql_gen/functions.rs index 6af491bd8a64d..01cbb0604d262 100644 --- a/src/tests/sqlsmith/src/sql_gen/functions.rs +++ b/src/tests/sqlsmith/src/sql_gen/functions.rs @@ -49,6 +49,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { 4 => self.gen_overlay(context), _ => unreachable!(), }, + T::Bytea => self.gen_decode(context), _ => match self.rng.gen_bool(0.5) { true => self.gen_case(ret, context), false => self.gen_coalesce(ret, context), @@ -121,36 +122,46 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { .collect() } + fn gen_decode(&mut self, context: SqlGeneratorContext) -> Expr { + let input_string = self.gen_expr(&DataType::Varchar, context); + let encoding = &["base64", "hex", "escape"].choose(&mut self.rng).unwrap(); + let args = vec![ + input_string, + Expr::Value(Value::SingleQuotedString(encoding.to_string())), + ]; + Expr::Function(make_simple_func("decode", &args)) + } + fn gen_fixed_func(&mut self, ret: &DataType, context: SqlGeneratorContext) -> Expr { let funcs = match FUNC_TABLE.get(ret) { None => return self.gen_simple_scalar(ret), Some(funcs) => funcs, }; let func = funcs.choose(&mut self.rng).unwrap(); - let can_implicit_cast = INVARIANT_FUNC_SET.contains(&func.func); + let can_implicit_cast = INVARIANT_FUNC_SET.contains(&func.name.as_scalar()); let exprs: Vec = func .inputs_type .iter() .map(|t| { - if let Some(from_tys) = IMPLICIT_CAST_TABLE.get(t) + if let Some(from_tys) = IMPLICIT_CAST_TABLE.get(t.as_exact()) && can_implicit_cast && self.flip_coin() { let from_ty = &from_tys.choose(&mut self.rng).unwrap().from_type; self.gen_implicit_cast(from_ty, context) } else { - self.gen_expr(t, context) + self.gen_expr(t.as_exact(), context) } }) .collect(); let expr = if exprs.len() == 1 { - make_unary_op(func.func, &exprs[0]) + make_unary_op(func.name.as_scalar(), &exprs[0]) } else if exprs.len() == 2 { - make_bin_op(func.func, &exprs) + make_bin_op(func.name.as_scalar(), &exprs) } else { None }; - expr.or_else(|| make_general_expr(func.func, exprs)) + expr.or_else(|| make_general_expr(func.name.as_scalar(), exprs)) .unwrap_or_else(|| self.gen_simple_scalar(ret)) } } diff --git a/src/tests/sqlsmith/src/sql_gen/time_window.rs b/src/tests/sqlsmith/src/sql_gen/time_window.rs index d5fd7c8936b22..053af729ee526 100644 --- a/src/tests/sqlsmith/src/sql_gen/time_window.rs +++ b/src/tests/sqlsmith/src/sql_gen/time_window.rs @@ -46,7 +46,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { let time_col = time_cols.choose(&mut self.rng).unwrap(); let time_col = Expr::Identifier(time_col.name.as_str().into()); let args = create_args(vec![name, time_col, size]); - let relation = create_tvf("tumble", alias, args); + let relation = create_tvf("tumble", alias, args, false); let table = Table::new(table_name, schema.clone()); @@ -72,7 +72,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { let time_col = Expr::Identifier(time_col.name.as_str().into()); let args = create_args(vec![name, time_col, slide, size]); - let relation = create_tvf("hop", alias, args); + let relation = create_tvf("hop", alias, args, false); let table = Table::new(table_name, schema.clone()); @@ -120,11 +120,17 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { } /// Create a table view function. -fn create_tvf(name: &str, alias: TableAlias, args: Vec) -> TableFactor { +fn create_tvf( + name: &str, + alias: TableAlias, + args: Vec, + with_ordinality: bool, +) -> TableFactor { TableFactor::TableFunction { name: ObjectName(vec![name.into()]), alias: Some(alias), args, + with_ordinality, } } diff --git a/src/tests/sqlsmith/src/sql_gen/types.rs b/src/tests/sqlsmith/src/sql_gen/types.rs index 939d869744ea5..06d170e604ace 100644 --- a/src/tests/sqlsmith/src/sql_gen/types.rs +++ b/src/tests/sqlsmith/src/sql_gen/types.rs @@ -19,10 +19,9 @@ use std::sync::LazyLock; use itertools::Itertools; use risingwave_common::types::{DataType, DataTypeName}; -use risingwave_expr::agg::AggKind; -use risingwave_expr::sig::agg::{agg_func_sigs, AggFuncSig as RwAggFuncSig}; +use risingwave_expr::aggregate::AggKind; use risingwave_expr::sig::cast::{cast_sigs, CastContext, CastSig as RwCastSig}; -use risingwave_expr::sig::func::{func_sigs, FuncSign as RwFuncSig}; +use risingwave_expr::sig::{FuncSign, FUNCTION_REGISTRY}; use risingwave_frontend::expr::ExprType; use risingwave_sqlparser::ast::{BinaryOperator, DataType as AstDataType, StructField}; @@ -104,73 +103,16 @@ impl TryFrom for CastSig { } } -/// Provide internal `FuncSig` which can be used for `struct` and `list`. -#[derive(Clone)] -pub struct FuncSig { - pub func: ExprType, - pub inputs_type: Vec, - pub ret_type: DataType, -} - -impl TryFrom<&RwFuncSig> for FuncSig { - type Error = String; - - fn try_from(value: &RwFuncSig) -> Result { - if let Some(inputs_type) = value - .inputs_type - .iter() - .map(data_type_name_to_ast_data_type) - .collect() - && let Some(ret_type) = data_type_name_to_ast_data_type(&value.ret_type) - { - Ok(FuncSig { - inputs_type, - ret_type, - func: value.func, - }) - } else { - Err(format!("unsupported func sig: {:?}", value)) - } - } -} - -/// Provide internal `AggFuncSig` which can be used for `struct` and `list`. -#[derive(Clone)] -pub struct AggFuncSig { - pub func: AggKind, - pub inputs_type: Vec, - pub ret_type: DataType, -} - -impl TryFrom<&RwAggFuncSig> for AggFuncSig { - type Error = String; - - fn try_from(value: &RwAggFuncSig) -> Result { - if let Some(inputs_type) = value - .inputs_type - .iter() - .map(data_type_name_to_ast_data_type) - .collect() - && let Some(ret_type) = data_type_name_to_ast_data_type(&value.ret_type) - { - Ok(AggFuncSig { - inputs_type, - ret_type, - func: value.func, - }) - } else { - Err(format!("unsupported agg_func sig: {:?}", value)) - } - } -} - /// Function ban list. /// These functions should be generated eventually, by adding expression constraints. /// If we naively generate arguments for these functions, it will affect sqlsmith /// effectiveness, e.g. cause it to crash. static FUNC_BAN_LIST: LazyLock> = LazyLock::new(|| { [ - ExprType::Repeat, // FIXME: https://github.com/risingwavelabs/risingwave/issues/8003 + // FIXME: https://github.com/risingwavelabs/risingwave/issues/8003 + ExprType::Repeat, + // The format argument needs to be handled specially. It is still generated in `gen_special_func`. + ExprType::Decode, ] .into_iter() .collect() @@ -178,26 +120,38 @@ static FUNC_BAN_LIST: LazyLock> = LazyLock::new(|| { /// Table which maps functions' return types to possible function signatures. // ENABLE: https://github.com/risingwavelabs/risingwave/issues/5826 -pub(crate) static FUNC_TABLE: LazyLock>> = LazyLock::new(|| { - let mut funcs = HashMap::>::new(); - func_sigs() - .filter(|func| { - func.inputs_type - .iter() - .all(|t| *t != DataTypeName::Timestamptz) - && !FUNC_BAN_LIST.contains(&func.func) - && !func.deprecated // deprecated functions are not accepted by frontend - }) - .filter_map(|func| func.try_into().ok()) - .for_each(|func: FuncSig| funcs.entry(func.ret_type.clone()).or_default().push(func)); - funcs -}); +// TODO: Create a `SPECIAL_FUNC` table. +// Otherwise when we dump the function table, we won't include those functions in +// gen_special_func. +pub(crate) static FUNC_TABLE: LazyLock>> = + LazyLock::new(|| { + let mut funcs = HashMap::>::new(); + FUNCTION_REGISTRY + .iter_scalars() + .filter(|func| { + func.inputs_type.iter().all(|t| { + t.is_exact() + && t.as_exact() != &DataType::Timestamptz + && t.as_exact() != &DataType::Serial + }) && func.ret_type.is_exact() + && !FUNC_BAN_LIST.contains(&func.name.as_scalar()) + && !func.deprecated // deprecated functions are not accepted by frontend + }) + .for_each(|func| { + funcs + .entry(func.ret_type.as_exact().clone()) + .or_default() + .push(func) + }); + funcs + }); /// Set of invariant functions // ENABLE: https://github.com/risingwavelabs/risingwave/issues/5826 pub(crate) static INVARIANT_FUNC_SET: LazyLock> = LazyLock::new(|| { - func_sigs() - .map(|sig| sig.func) + FUNCTION_REGISTRY + .iter_scalars() + .map(|sig| sig.name.as_scalar()) .counts() .into_iter() .filter(|(_key, count)| *count == 1) @@ -207,14 +161,16 @@ pub(crate) static INVARIANT_FUNC_SET: LazyLock> = LazyLock::ne /// Table which maps aggregate functions' return types to possible function signatures. // ENABLE: https://github.com/risingwavelabs/risingwave/issues/5826 -pub(crate) static AGG_FUNC_TABLE: LazyLock>> = LazyLock::new( - || { - let mut funcs = HashMap::>::new(); - agg_func_sigs() +pub(crate) static AGG_FUNC_TABLE: LazyLock>> = + LazyLock::new(|| { + let mut funcs = HashMap::>::new(); + FUNCTION_REGISTRY + .iter_aggregates() .filter(|func| { func.inputs_type .iter() - .all(|t| *t != DataTypeName::Timestamptz) + .all(|t| t.is_exact() && t.as_exact() != &DataType::Timestamptz && t.as_exact() != &DataType::Serial) + && func.ret_type.is_exact() // Ignored functions && ![ AggKind::Sum0, // Used internally @@ -226,25 +182,23 @@ pub(crate) static AGG_FUNC_TABLE: LazyLock>> = AggKind::PercentileDisc, AggKind::Mode, ] - .contains(&func.func) + .contains(&func.name.as_aggregate()) // Exclude 2 phase agg global sum. // Sum(Int64) -> Int64. // Otherwise it conflicts with normal aggregation: // Sum(Int64) -> Decimal. // And sqlsmith will generate expressions with wrong types. - && if func.func == AggKind::Sum { - !(func.inputs_type[0] == DataTypeName::Int64 && func.ret_type == DataTypeName::Int64) + && if func.name.as_aggregate() == AggKind::Sum { + !(func.inputs_type[0].as_exact() == &DataType::Int64 && func.ret_type.as_exact() == &DataType::Int64) } else { true } }) - .filter_map(|func| func.try_into().ok()) - .for_each(|func: AggFuncSig| { - funcs.entry(func.ret_type.clone()).or_default().push(func) + .for_each(|func| { + funcs.entry(func.ret_type.as_exact().clone()).or_default().push(func) }); funcs - }, -); + }); /// Build a cast map from return types to viable cast-signatures. /// NOTE: We avoid cast from varchar to other datatypes apart from itself. @@ -299,28 +253,24 @@ pub(crate) static BINARY_INEQUALITY_OP_TABLE: LazyLock< HashMap<(DataType, DataType), Vec>, > = LazyLock::new(|| { let mut funcs = HashMap::<(DataType, DataType), Vec>::new(); - func_sigs() + FUNCTION_REGISTRY + .iter_scalars() .filter(|func| { - !FUNC_BAN_LIST.contains(&func.func) - && func.ret_type == DataTypeName::Boolean + !FUNC_BAN_LIST.contains(&func.name.as_scalar()) + && func.ret_type == DataType::Boolean.into() && func.inputs_type.len() == 2 && func .inputs_type .iter() - .all(|t| *t != DataTypeName::Timestamptz) + .all(|t| t.is_exact() && t.as_exact() != &DataType::Timestamptz) }) .filter_map(|func| { - let Some(lhs) = data_type_name_to_ast_data_type(&func.inputs_type[0]) else { - return None; - }; - let Some(rhs) = data_type_name_to_ast_data_type(&func.inputs_type[1]) else { - return None; - }; - let args = (lhs, rhs); - let Some(op) = expr_type_to_inequality_op(func.func) else { + let lhs = func.inputs_type[0].as_exact().clone(); + let rhs = func.inputs_type[1].as_exact().clone(); + let Some(op) = expr_type_to_inequality_op(func.name.as_scalar()) else { return None; }; - Some((args, op)) + Some(((lhs, rhs), op)) }) .for_each(|(args, op)| funcs.entry(args).or_default().push(op)); funcs diff --git a/src/tests/state_cleaning_test/Cargo.toml b/src/tests/state_cleaning_test/Cargo.toml index 2116e1d58659a..d9154309f4a99 100644 --- a/src/tests/state_cleaning_test/Cargo.toml +++ b/src/tests/state_cleaning_test/Cargo.toml @@ -25,7 +25,7 @@ serde_with = "3" tokio = { version = "0.2", package = "madsim-tokio" } tokio-postgres = "0.7" tokio-stream = { version = "0.1", features = ["fs"] } -toml = "0.7" +toml = "0.8" tracing = "0.1" [target.'cfg(not(madsim))'.dependencies] diff --git a/src/tests/state_cleaning_test/data/agg.toml b/src/tests/state_cleaning_test/data/agg.toml index 921908bc73fed..926fc4276b8b1 100644 --- a/src/tests/state_cleaning_test/data/agg.toml +++ b/src/tests/state_cleaning_test/data/agg.toml @@ -11,7 +11,7 @@ init_sqls = [ WATERMARK FOR created_at AS created_at - interval '9' second ) APPEND ONLY WITH ( connector = 'datagen', - rows_per_second = 100, + datagen.rows.per.second = 100, datagen.split.num = 16, fields.created_at.max_past_mode = 'relative', fields.created_at.max_past = '10s', diff --git a/src/tests/state_cleaning_test/data/join.toml b/src/tests/state_cleaning_test/data/join.toml index 76d5379231c99..f03600acdfdfd 100644 --- a/src/tests/state_cleaning_test/data/join.toml +++ b/src/tests/state_cleaning_test/data/join.toml @@ -10,7 +10,7 @@ init_sqls = [ WATERMARK FOR created_at AS created_at - interval '9' second ) APPEND ONLY WITH ( connector = 'datagen', - rows_per_second = 100, + datagen.rows.per.second = 100, datagen.split.num = 16, fields.created_at.max_past_mode = 'relative', fields.created_at.max_past = '10s', @@ -30,7 +30,7 @@ init_sqls = [ WATERMARK FOR created_at AS created_at - interval '9' second ) APPEND ONLY WITH ( connector = 'datagen', - rows_per_second = 200, + datagen.rows.per.second = 200, datagen.split.num = 16, fields.created_at.max_past_mode = 'relative', fields.created_at.max_past = '10s', diff --git a/src/tests/state_cleaning_test/data/temporal_filter.toml b/src/tests/state_cleaning_test/data/temporal_filter.toml index 043bb852a7667..6010dc0e12607 100644 --- a/src/tests/state_cleaning_test/data/temporal_filter.toml +++ b/src/tests/state_cleaning_test/data/temporal_filter.toml @@ -9,7 +9,7 @@ init_sqls = [ WATERMARK FOR created_at AS created_at - interval '9' second ) APPEND ONLY WITH ( connector = 'datagen', - rows_per_second = 200, + datagen.rows.per.second = 200, datagen.split.num = 16, fields.created_at.max_past_mode = 'relative', fields.created_at.max_past = '10s', diff --git a/src/udf/Cargo.toml b/src/udf/Cargo.toml index 94e541c269999..0f8e80c42612f 100644 --- a/src/udf/Cargo.toml +++ b/src/udf/Cargo.toml @@ -18,6 +18,7 @@ arrow-schema = { workspace = true } arrow-select = { workspace = true } base64 = "0.21" bytes = "1.4" +cfg-or-panic = "0.2" futures-util = "0.3.28" itertools = "0.11" risingwave_object_store = { workspace = true } diff --git a/src/udf/python/risingwave/__init__.py b/src/udf/python/risingwave/__init__.py index e69de29bb2d1d..73ba50c33cc16 100644 --- a/src/udf/python/risingwave/__init__.py +++ b/src/udf/python/risingwave/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/udf/python/risingwave/test_udf.py b/src/udf/python/risingwave/test_udf.py index d1507438e734f..e331e12f3a761 100644 --- a/src/udf/python/risingwave/test_udf.py +++ b/src/udf/python/risingwave/test_udf.py @@ -1,3 +1,17 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from multiprocessing import Process import pytest from risingwave.udf import udf, UdfServer, _to_data_type diff --git a/src/udf/python/risingwave/udf.py b/src/udf/python/risingwave/udf.py index 03dbe1a4224a3..758421b1db68d 100644 --- a/src/udf/python/risingwave/udf.py +++ b/src/udf/python/risingwave/udf.py @@ -1,3 +1,17 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from typing import * import pyarrow as pa import pyarrow.flight diff --git a/src/udf/python/risingwave/udf/health_check.py b/src/udf/python/risingwave/udf/health_check.py index f849029bbee36..57b8f0e6374e5 100644 --- a/src/udf/python/risingwave/udf/health_check.py +++ b/src/udf/python/risingwave/udf/health_check.py @@ -1,3 +1,17 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from pyarrow.flight import FlightClient import sys diff --git a/src/udf/src/error.rs b/src/udf/src/error.rs index f20816ee5b2c0..4a2ae18f61357 100644 --- a/src/udf/src/error.rs +++ b/src/udf/src/error.rs @@ -23,7 +23,7 @@ pub enum Error { #[error("failed to connect to UDF service: {0}")] Connect(#[from] tonic::transport::Error), - #[error("failed to check UDF: {0}")] + #[error("failed to send requests to UDF service: {0}")] Tonic(#[from] Box), #[error("failed to call UDF: {0}")] @@ -45,7 +45,7 @@ pub enum Error { ServiceError(String), } -static_assertions::const_assert_eq!(std::mem::size_of::(), 32); +static_assertions::const_assert_eq!(std::mem::size_of::(), 40); impl From for Error { fn from(status: tonic::Status) -> Self { diff --git a/src/udf/src/external.rs b/src/udf/src/external.rs index 585adc7ebec5b..e77b96f2bdab4 100644 --- a/src/udf/src/external.rs +++ b/src/udf/src/external.rs @@ -19,6 +19,7 @@ use arrow_flight::error::FlightError; use arrow_flight::flight_service_client::FlightServiceClient; use arrow_flight::{FlightData, FlightDescriptor}; use arrow_schema::Schema; +use cfg_or_panic::cfg_or_panic; use futures_util::{stream, Stream, StreamExt, TryStreamExt}; use tonic::transport::Channel; @@ -30,7 +31,8 @@ pub struct ArrowFlightUdfClient { client: FlightServiceClient, } -#[cfg(not(madsim))] +// TODO: support UDF in simulation +#[cfg_or_panic(not(madsim))] impl ArrowFlightUdfClient { /// Connect to a UDF service. pub async fn connect(addr: &str) -> Result { @@ -38,6 +40,13 @@ impl ArrowFlightUdfClient { Ok(Self { client }) } + /// Connect to a UDF service lazily (i.e. only when the first request is sent). + pub fn connect_lazy(addr: &str) -> Result { + let conn = tonic::transport::Endpoint::new(addr.to_string())?.connect_lazy(); + let client = FlightServiceClient::new(conn); + Ok(Self { client }) + } + /// Check if the function is available and the schema is match. pub async fn check(&self, id: &str, args: &Schema, returns: &Schema) -> Result<()> { let descriptor = FlightDescriptor::new_path(vec![id.into()]); @@ -98,6 +107,7 @@ impl ArrowFlightUdfClient { } /// Call a function with streaming input and output. + #[panic_return = "Result>"] pub async fn call_stream( &self, id: &str, @@ -129,35 +139,6 @@ impl ArrowFlightUdfClient { } } -// TODO: support UDF in simulation -#[cfg(madsim)] -impl ArrowFlightUdfClient { - /// Connect to a UDF service. - pub async fn connect(_addr: &str) -> Result { - panic!("UDF is not supported in simulation yet") - } - - /// Check if the function is available. - pub async fn check(&self, _id: &str, _args: &Schema, _returns: &Schema) -> Result<()> { - panic!("UDF is not supported in simulation yet") - } - - /// Call a function. - pub async fn call(&self, _id: &str, _input: RecordBatch) -> Result { - panic!("UDF is not supported in simulation yet") - } - - /// Call a function with streaming input and output. - pub async fn call_stream( - &self, - _id: &str, - _inputs: impl Stream + Send + 'static, - ) -> Result> + Send + 'static> { - panic!("UDF is not supported in simulation yet"); - Ok(stream::empty()) - } -} - /// Check if two list of data types match, ignoring field names. fn data_types_match(a: &[&arrow_schema::DataType], b: &[&arrow_schema::DataType]) -> bool { if a.len() != b.len() { diff --git a/src/udf/src/wasm.rs b/src/udf/src/wasm.rs index 3a241e7d19933..06c84c9baf7db 100644 --- a/src/udf/src/wasm.rs +++ b/src/udf/src/wasm.rs @@ -231,7 +231,7 @@ impl WasmEngine { identifier: &str, ) -> WasmUdfResult { let object_store = get_wasm_storage(wasm_storage_url).await?; - let serialized_component = object_store.read(&compiled_path(identifier), None).await?; + let serialized_component = object_store.read(&compiled_path(identifier), ..).await?; // This is fast. let component = unsafe { diff --git a/src/utils/pgwire/Cargo.toml b/src/utils/pgwire/Cargo.toml index cfa82c1393de8..c6d46e356518a 100644 --- a/src/utils/pgwire/Cargo.toml +++ b/src/utils/pgwire/Cargo.toml @@ -16,7 +16,7 @@ normal = ["workspace-hack"] [dependencies] anyhow = { version = "1.0", default-features = false } -byteorder = "1.4" +byteorder = "1.5" bytes = "1" futures = { version = "0.3", default-features = false, features = ["alloc"] } itertools = "0.11" diff --git a/src/utils/pgwire/src/lib.rs b/src/utils/pgwire/src/lib.rs index 1cda373ee9568..84a17d9907879 100644 --- a/src/utils/pgwire/src/lib.rs +++ b/src/utils/pgwire/src/lib.rs @@ -17,8 +17,6 @@ #![feature(result_option_inspect)] #![feature(iterator_try_collect)] #![feature(trusted_len)] -#![feature(async_fn_in_trait)] -#![feature(return_position_impl_trait_in_trait)] #![feature(lazy_cell)] #![expect(clippy::doc_markdown, reason = "FIXME: later")] diff --git a/src/utils/pgwire/src/pg_message.rs b/src/utils/pgwire/src/pg_message.rs index 408330a2df6ae..b28d09116f94f 100644 --- a/src/utils/pgwire/src/pg_message.rs +++ b/src/utils/pgwire/src/pg_message.rs @@ -451,8 +451,8 @@ impl<'a> BeMessage<'a> { // Parameter names and values are passed as null-terminated strings let iov = &mut [name, b"\0", value, b"\0"].map(IoSlice::new); - let mut buffer = [0u8; 64]; // this should be enough - let cnt = buffer.as_mut().write_vectored(iov).unwrap(); + let mut buffer = vec![]; + let cnt = buffer.write_vectored(iov).unwrap(); buf.put_u8(b'S'); write_body(buf, |stream| { diff --git a/src/utils/pgwire/src/pg_response.rs b/src/utils/pgwire/src/pg_response.rs index 29ea77f83b71b..eeec929732f50 100644 --- a/src/utils/pgwire/src/pg_response.rs +++ b/src/utils/pgwire/src/pg_response.rs @@ -92,6 +92,7 @@ pub enum StatementType { ROLLBACK, SET_TRANSACTION, CANCEL_COMMAND, + WAIT, } impl std::fmt::Display for StatementType { @@ -278,6 +279,7 @@ impl StatementType { }, Statement::Explain { .. } => Ok(StatementType::EXPLAIN), Statement::Flush => Ok(StatementType::FLUSH), + Statement::Wait => Ok(StatementType::WAIT), _ => Err("unsupported statement type".to_string()), } } diff --git a/src/utils/runtime/Cargo.toml b/src/utils/runtime/Cargo.toml index 6a11fce54ffb6..8bd4e49d808a9 100644 --- a/src/utils/runtime/Cargo.toml +++ b/src/utils/runtime/Cargo.toml @@ -16,22 +16,18 @@ normal = ["workspace-hack"] [dependencies] await-tree = { workspace = true } -chrono = { version = "0.4", default-features = false, features = [ - "clock", - "std", -] } console = "0.15" -console-subscriber = "0.1.10" +console-subscriber = "0.2.0" either = "1" futures = { version = "0.3", default-features = false, features = ["alloc"] } hostname = "0.3" opentelemetry-otlp = { version = "0.13" } opentelemetry-semantic-conventions = "0.12" parking_lot = { version = "0.12", features = ["deadlock_detection"] } -pprof = { version = "0.12", features = ["flamegraph"] } -prometheus = { version = "0.13" } +pprof = { version = "0.13", features = ["flamegraph"] } risingwave_common = { workspace = true } risingwave_variables = { workspace = true } +rlimit = "0.10" time = { version = "0.3", features = ["formatting", "local-offset"] } tokio = { version = "0.2", package = "madsim-tokio", features = [ "rt", diff --git a/src/utils/runtime/src/logger.rs b/src/utils/runtime/src/logger.rs index ce038de71446c..a86c585c0a3fd 100644 --- a/src/utils/runtime/src/logger.rs +++ b/src/utils/runtime/src/logger.rs @@ -28,29 +28,6 @@ use tracing_subscriber::{filter, EnvFilter}; const PGWIRE_QUERY_LOG: &str = "pgwire_query_log"; const SLOW_QUERY_LOG: &str = "risingwave_frontend_slow_query_log"; -/// Configure log targets for some `RisingWave` crates. -/// -/// Other RisingWave crates will follow the default level (`DEBUG` or `INFO` according to -/// the `debug_assertions` and `is_ci` flag). -fn configure_risingwave_targets_fmt(targets: filter::Targets) -> filter::Targets { - targets - // force a lower level for important logs - .with_target("risingwave_stream", Level::DEBUG) - .with_target("risingwave_storage", Level::DEBUG) - // force a higher level for noisy logs - .with_target("risingwave_sqlparser", Level::INFO) - .with_target("pgwire", Level::INFO) - .with_target(PGWIRE_QUERY_LOG, Level::OFF) - // force a higher level for foyer logs - .with_target("foyer", Level::WARN) - .with_target("foyer_common", Level::WARN) - .with_target("foyer_intrusive", Level::WARN) - .with_target("foyer_memory", Level::WARN) - .with_target("foyer_storage", Level::WARN) - // disable events that are too verbose - .with_target("events", Level::ERROR) -} - pub struct LoggerSettings { /// The name of the service. name: String, @@ -122,9 +99,12 @@ impl LoggerSettings { /// Overrides default level and tracing targets of the fmt layer (formatting and /// logging to `stdout` or `stderr`). /// +/// Note that only verbosity levels below or equal to `DEBUG` are effective in +/// release builds. +/// /// e.g., /// ```bash -/// RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" +/// RUST_LOG="info,risingwave_stream=debug,events=debug" /// ``` /// /// ### `RW_QUERY_LOG_PATH` @@ -159,8 +139,20 @@ pub fn init_risingwave_logger(settings: LoggerSettings) { // Default filter for logging to stdout and tracing. let default_filter = { - let mut filter = filter::Targets::new() - // force a higher level for noisy logs in 3rd-party crates + let mut filter = filter::Targets::new(); + + // Configure levels for some RisingWave crates. + // Other RisingWave crates like `stream` and `storage` will follow the default level. + filter = filter + .with_target("risingwave_sqlparser", Level::INFO) + .with_target("pgwire", Level::INFO) + .with_target(PGWIRE_QUERY_LOG, Level::OFF) + // debug-purposed events are disabled unless `RUST_LOG` overrides + .with_target("events", Level::OFF); + + // Configure levels for external crates. + filter = filter + .with_target("foyer", Level::WARN) .with_target("aws_sdk_ec2", Level::INFO) .with_target("aws_sdk_s3", Level::INFO) .with_target("aws_config", Level::WARN) @@ -177,10 +169,8 @@ pub fn init_risingwave_logger(settings: LoggerSettings) { .with_target("cranelift", Level::INFO) .with_target("wasmtime", Level::INFO); - filter = configure_risingwave_targets_fmt(filter); - - // For all other crates - filter = filter.with_default(match Deployment::current() { + // For all other crates, apply default level depending on the deployment and `debug_assertions` flag. + let default_level = match deployment { Deployment::Ci => Level::INFO, _ => { if cfg!(debug_assertions) { @@ -189,22 +179,23 @@ pub fn init_risingwave_logger(settings: LoggerSettings) { Level::INFO } } - }); + }; + filter = filter.with_default(default_level); - // Overrides from settings + // Overrides from settings. filter = filter.with_targets(settings.targets); if let Some(default_level) = settings.default_level { filter = filter.with_default(default_level); } - // Overrides from env var + // Overrides from env var. if let Ok(rust_log) = std::env::var(EnvFilter::DEFAULT_ENV) && !rust_log.is_empty() { - let rust_log_targets: Targets = rust_log.parse().expect("failed to parse `RUST_LOG`"); - if let Some(default_level) = rust_log_targets.default_level() { - filter = filter.with_default(default_level); - } - filter = filter.with_targets(rust_log_targets) - }; + let rust_log_targets: Targets = rust_log.parse().expect("failed to parse `RUST_LOG`"); + if let Some(default_level) = rust_log_targets.default_level() { + filter = filter.with_default(default_level); + } + filter = filter.with_targets(rust_log_targets) + }; filter }; diff --git a/src/utils/runtime/src/panic_hook.rs b/src/utils/runtime/src/panic_hook.rs index 992126e196f58..848e7df8509c7 100644 --- a/src/utils/runtime/src/panic_hook.rs +++ b/src/utils/runtime/src/panic_hook.rs @@ -15,6 +15,10 @@ /// Set panic hook to abort the process if we're not catching unwind, without losing the information /// of stack trace and await-tree. pub fn set_panic_hook() { + if let Ok(limit) = rlimit::Resource::CORE.get_soft() && limit > 0 { + tracing::info!(limit, "coredump on panic is likely to be enabled"); + }; + std::panic::update_hook(|default_hook, info| { default_hook(info); diff --git a/src/utils/workspace-config/Cargo.toml b/src/utils/workspace-config/Cargo.toml index d8b2dd800ab1b..df70a2c6d0054 100644 --- a/src/utils/workspace-config/Cargo.toml +++ b/src/utils/workspace-config/Cargo.toml @@ -25,5 +25,10 @@ zstd-sys = { version = "2", optional = true, default-features = false, features # workspace-hack = { path = "../../workspace-hack" } # Don't add workspace-hack into this crate! +# FIXME(xxchan): This is a temporary fix due to how cargo and hakari works. See related PR for more details. +# We will revisit how to handle workspace-hack and build-dependency issues later. +[build-dependencies] +openssl-sys = { version = "=0.9.92", optional = true, features = ["vendored"] } + [lints] workspace = true diff --git a/src/workspace-hack/Cargo.toml b/src/workspace-hack/Cargo.toml index e492244bee9f5..d21f92780d053 100644 --- a/src/workspace-hack/Cargo.toml +++ b/src/workspace-hack/Cargo.toml @@ -19,24 +19,28 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] ahash = { version = "0.8" } +allocator-api2 = { version = "0.2", default-features = false, features = ["alloc", "nightly"] } anyhow = { version = "1", features = ["backtrace"] } +async-std = { version = "1", features = ["attributes", "tokio1"] } aws-credential-types = { version = "0.55", default-features = false, features = ["hardcoded-credentials"] } aws-sdk-s3 = { version = "0.28", features = ["native-tls"] } aws-smithy-client = { version = "0.55", default-features = false, features = ["native-tls", "rustls"] } base64 = { version = "0.21", features = ["alloc"] } -bitflags = { version = "2", default-features = false, features = ["std"] } -byteorder = { version = "1", features = ["i128"] } +bit-vec = { version = "0.6" } +bitflags = { version = "2", default-features = false, features = ["serde", "std"] } +byteorder = { version = "1" } bytes = { version = "1", features = ["serde"] } -chrono = { version = "0.4", features = ["alloc", "serde"] } +chrono = { version = "0.4", features = ["serde"] } clap = { version = "4", features = ["cargo", "derive", "env"] } clap_builder = { version = "4", default-features = false, features = ["cargo", "color", "env", "help", "std", "suggestions", "usage"] } -combine = { version = "4" } +combine = { version = "4", features = ["tokio"] } crc32fast = { version = "1" } crossbeam-epoch = { version = "0.9" } +crossbeam-queue = { version = "0.3" } crossbeam-utils = { version = "0.8" } -deranged = { version = "0.3", default-features = false, features = ["serde", "std"] } -digest = { version = "0.10", features = ["mac", "std"] } -either = { version = "1" } +deranged = { version = "0.3", default-features = false, features = ["powerfmt", "serde", "std"] } +digest = { version = "0.10", features = ["mac", "oid", "std"] } +either = { version = "1", features = ["serde"] } fail = { version = "0.5", default-features = false, features = ["failpoints"] } fallible-iterator = { version = "0.2" } fixedbitset = { version = "0.4" } @@ -54,7 +58,10 @@ hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["nightly", "raw"] } hyper = { version = "0.14", features = ["full"] } indexmap = { version = "1", default-features = false, features = ["serde-1", "std"] } -itertools = { version = "0.10" } +itertools-93f6ce9d446188ac = { package = "itertools", version = "0.10" } +itertools-a6292c17cd707f01 = { package = "itertools", version = "0.11" } +jni = { version = "0.21", features = ["invocation"] } +lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } lexical-core = { version = "0.8", features = ["format"] } lexical-parse-float = { version = "0.8", default-features = false, features = ["format", "std"] } lexical-parse-integer = { version = "0.8", default-features = false, features = ["format", "std"] } @@ -63,17 +70,19 @@ lexical-write-float = { version = "0.8", default-features = false, features = [" lexical-write-integer = { version = "0.8", default-features = false, features = ["format", "std"] } libc = { version = "0.2", features = ["extra_traits"] } lock_api = { version = "0.4", features = ["arc_lock"] } -log = { version = "0.4", default-features = false, features = ["std"] } -madsim-rdkafka = { git = "https://github.com/madsim-rs/madsim.git", rev = "bb8f063", features = ["cmake-build", "gssapi", "ssl-vendored", "zstd"] } +log = { version = "0.4", default-features = false, features = ["kv_unstable", "std"] } +madsim-rdkafka = { version = "0.3", features = ["cmake-build", "gssapi", "ssl-vendored", "zstd"] } madsim-tokio = { version = "0.2", default-features = false, features = ["fs", "io-util", "macros", "net", "process", "rt", "rt-multi-thread", "signal", "sync", "time", "tracing"] } +md-5 = { version = "0.10" } mio = { version = "0.8", features = ["net", "os-ext"] } -multimap = { version = "0.8" } nom = { version = "7" } num-bigint = { version = "0.4" } num-integer = { version = "0.1", features = ["i128"] } +num-iter = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128", "libm"] } opentelemetry_api = { version = "0.20", features = ["logs", "metrics"] } opentelemetry_sdk = { version = "0.20", features = ["logs", "metrics"] } +ordered-float = { version = "3" } parking_lot = { version = "0.12", features = ["arc_lock", "deadlock_detection"] } parking_lot_core = { version = "0.9", default-features = false, features = ["deadlock_detection"] } petgraph = { version = "0.6" } @@ -81,34 +90,51 @@ phf = { version = "0.11", features = ["uncased"] } phf_shared = { version = "0.11", features = ["uncased"] } postgres-types = { version = "0.2", default-features = false, features = ["derive", "with-chrono-0_4", "with-serde_json-1"] } prometheus = { version = "0.13", features = ["process"] } -prost = { version = "0.11", features = ["no-recursion-limit"] } +prost-5ef9efb8ec2df382 = { package = "prost", version = "0.12", features = ["no-recursion-limit"] } +prost-a6292c17cd707f01 = { package = "prost", version = "0.11" } +prost-types = { version = "0.12" } rand = { version = "0.8", features = ["small_rng"] } rand_chacha = { version = "0.3" } rand_core = { version = "0.6", default-features = false, features = ["std"] } +redis = { version = "0.23", features = ["async-std-comp", "tokio-comp"] } regex = { version = "1" } -regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } -regex-syntax = { version = "0.7" } +regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.8" } reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"] } ring = { version = "0.16", features = ["std"] } rust_decimal = { version = "1", features = ["db-postgres", "maths"] } rustc-hash = { version = "1" } -rustix = { version = "0.37", features = ["fs", "net"] } +rustix = { version = "0.38", features = ["fs"] } +rustls = { version = "0.21" } scopeguard = { version = "1" } +sea-orm = { version = "0.12", features = ["runtime-tokio-native-tls", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite"] } +sea-query = { version = "0.30", default-features = false, features = ["backend-mysql", "backend-postgres", "backend-sqlite", "derive", "hashable-value", "postgres-array", "thread-safe", "with-bigdecimal", "with-chrono", "with-json", "with-rust_decimal", "with-time", "with-uuid"] } +sea-query-binder = { version = "0.5", default-features = false, features = ["postgres-array", "runtime-tokio-native-tls", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", "with-bigdecimal", "with-chrono", "with-json", "with-rust_decimal", "with-time", "with-uuid"] } serde = { version = "1", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1", features = ["alloc"] } +serde_json = { version = "1", features = ["alloc", "raw_value"] } serde_with = { version = "3", features = ["json"] } +sha1 = { version = "0.10" } +sha2 = { version = "0.10" } +signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } smallvec = { version = "1", default-features = false, features = ["serde", "union", "write"] } +sqlx = { version = "0.7", default-features = false, features = ["bigdecimal", "chrono", "json", "mysql", "postgres", "runtime-tokio-native-tls", "rust_decimal", "sqlite", "time", "uuid"] } +sqlx-core = { version = "0.7", features = ["_rt-tokio", "_tls-native-tls", "bigdecimal", "chrono", "json", "migrate", "offline", "rust_decimal", "time", "uuid"] } +sqlx-mysql = { version = "0.7", default-features = false, features = ["bigdecimal", "chrono", "json", "rust_decimal", "time", "uuid"] } +sqlx-postgres = { version = "0.7", default-features = false, features = ["bigdecimal", "chrono", "json", "rust_decimal", "time", "uuid"] } +sqlx-sqlite = { version = "0.7", default-features = false, features = ["chrono", "json", "time", "uuid"] } stable_deref_trait = { version = "1" } +strum = { version = "0.25", features = ["derive"] } subtle = { version = "2" } time = { version = "0.3", features = ["local-offset", "macros", "serde-well-known"] } tinyvec = { version = "1", features = ["alloc", "grab_spare_slice", "rustc_1_55"] } tokio = { version = "1", features = ["full", "stats", "tracing"] } -tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "4538cd6", features = ["with-chrono-0_4"] } +tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "ac00d88", features = ["with-chrono-0_4"] } tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "fe39bb8e", features = ["fs", "net"] } tokio-util = { version = "0.7", features = ["codec", "io"] } toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } toml_edit = { version = "0.19", features = ["serde"] } -tonic = { version = "0.9", features = ["gzip", "tls-webpki-roots"] } +tonic-274715c4dabd11b0 = { package = "tonic", version = "0.9", features = ["gzip", "tls-webpki-roots"] } +tonic-93f6ce9d446188ac = { package = "tonic", version = "0.10" } tower = { version = "0.4", features = ["balance", "buffer", "filter", "limit", "load-shed", "retry", "timeout", "util"] } tracing = { version = "0.1", features = ["log"] } tracing-core = { version = "0.1" } @@ -117,114 +143,58 @@ unicode-bidi = { version = "0.3" } unicode-normalization = { version = "0.1" } url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["fast-rng", "serde", "v4"] } -zeroize = { version = "1", features = ["zeroize_derive"] } +whoami = { version = "1" } [build-dependencies] ahash = { version = "0.8" } +allocator-api2 = { version = "0.2", default-features = false, features = ["alloc", "nightly"] } anyhow = { version = "1", features = ["backtrace"] } auto_enums = { version = "0.8", features = ["futures03"] } -aws-credential-types = { version = "0.55", default-features = false, features = ["hardcoded-credentials"] } -aws-sdk-s3 = { version = "0.28", features = ["native-tls"] } -aws-smithy-client = { version = "0.55", default-features = false, features = ["native-tls", "rustls"] } -base64 = { version = "0.21", features = ["alloc"] } -bitflags = { version = "2", default-features = false, features = ["std"] } -byteorder = { version = "1", features = ["i128"] } +bitflags = { version = "2", default-features = false, features = ["serde", "std"] } bytes = { version = "1", features = ["serde"] } cc = { version = "1", default-features = false, features = ["parallel"] } -chrono = { version = "0.4", features = ["alloc", "serde"] } -clap = { version = "4", features = ["cargo", "derive", "env"] } -clap_builder = { version = "4", default-features = false, features = ["cargo", "color", "env", "help", "std", "suggestions", "usage"] } -combine = { version = "4" } -crc32fast = { version = "1" } -crossbeam-epoch = { version = "0.9" } -crossbeam-utils = { version = "0.8" } -deranged = { version = "0.3", default-features = false, features = ["serde", "std"] } -digest = { version = "0.10", features = ["mac", "std"] } -either = { version = "1" } -fail = { version = "0.5", default-features = false, features = ["failpoints"] } -fallible-iterator = { version = "0.2" } +deranged = { version = "0.3", default-features = false, features = ["powerfmt", "serde", "std"] } +either = { version = "1", features = ["serde"] } fixedbitset = { version = "0.4" } -flate2 = { version = "1", features = ["zlib"] } frunk_core = { version = "0.4", default-features = false, features = ["std"] } -futures = { version = "0.3" } -futures-channel = { version = "0.3", features = ["sink"] } -futures-core = { version = "0.3" } -futures-io = { version = "0.3" } -futures-sink = { version = "0.3" } -futures-task = { version = "0.3" } -futures-util = { version = "0.3", features = ["channel", "io", "sink"] } hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["nightly", "raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features = ["raw"] } hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["nightly", "raw"] } -hyper = { version = "0.14", features = ["full"] } indexmap = { version = "1", default-features = false, features = ["serde-1", "std"] } -itertools = { version = "0.10" } -lexical-core = { version = "0.8", features = ["format"] } -lexical-parse-float = { version = "0.8", default-features = false, features = ["format", "std"] } -lexical-parse-integer = { version = "0.8", default-features = false, features = ["format", "std"] } -lexical-util = { version = "0.8", default-features = false, features = ["format", "parse-floats", "parse-integers", "std", "write-floats", "write-integers"] } -lexical-write-float = { version = "0.8", default-features = false, features = ["format", "std"] } -lexical-write-integer = { version = "0.8", default-features = false, features = ["format", "std"] } +itertools-93f6ce9d446188ac = { package = "itertools", version = "0.10" } +itertools-a6292c17cd707f01 = { package = "itertools", version = "0.11" } +lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } libc = { version = "0.2", features = ["extra_traits"] } -lock_api = { version = "0.4", features = ["arc_lock"] } -log = { version = "0.4", default-features = false, features = ["std"] } -madsim-rdkafka = { git = "https://github.com/madsim-rs/madsim.git", rev = "bb8f063", features = ["cmake-build", "gssapi", "ssl-vendored", "zstd"] } -madsim-tokio = { version = "0.2", default-features = false, features = ["fs", "io-util", "macros", "net", "process", "rt", "rt-multi-thread", "signal", "sync", "time", "tracing"] } -mio = { version = "0.8", features = ["net", "os-ext"] } -multimap = { version = "0.8" } +log = { version = "0.4", default-features = false, features = ["kv_unstable", "std"] } nom = { version = "7" } num-bigint = { version = "0.4" } num-integer = { version = "0.1", features = ["i128"] } num-traits = { version = "0.2", features = ["i128", "libm"] } -opentelemetry_api = { version = "0.20", features = ["logs", "metrics"] } -opentelemetry_sdk = { version = "0.20", features = ["logs", "metrics"] } -parking_lot = { version = "0.12", features = ["arc_lock", "deadlock_detection"] } -parking_lot_core = { version = "0.9", default-features = false, features = ["deadlock_detection"] } petgraph = { version = "0.6" } phf = { version = "0.11", features = ["uncased"] } phf_shared = { version = "0.11", features = ["uncased"] } -postgres-types = { version = "0.2", default-features = false, features = ["derive", "with-chrono-0_4", "with-serde_json-1"] } proc-macro2 = { version = "1", features = ["span-locations"] } -prometheus = { version = "0.13", features = ["process"] } -prost = { version = "0.11", features = ["no-recursion-limit"] } +prost-5ef9efb8ec2df382 = { package = "prost", version = "0.12", features = ["no-recursion-limit"] } +prost-a6292c17cd707f01 = { package = "prost", version = "0.11" } +prost-types = { version = "0.12" } rand = { version = "0.8", features = ["small_rng"] } rand_chacha = { version = "0.3" } rand_core = { version = "0.6", default-features = false, features = ["std"] } regex = { version = "1" } -regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } -regex-syntax = { version = "0.7" } -reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"] } -ring = { version = "0.16", features = ["std"] } -rust_decimal = { version = "1", features = ["db-postgres", "maths"] } +regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.8" } rustc-hash = { version = "1" } -rustix = { version = "0.37", features = ["fs", "net"] } -scopeguard = { version = "1" } +rustix = { version = "0.38", features = ["fs"] } serde = { version = "1", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1", features = ["alloc"] } -serde_with = { version = "3", features = ["json"] } -smallvec = { version = "1", default-features = false, features = ["serde", "union", "write"] } -stable_deref_trait = { version = "1" } -subtle = { version = "2" } +serde_json = { version = "1", features = ["alloc", "raw_value"] } syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "full", "visit", "visit-mut"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3", features = ["local-offset", "macros", "serde-well-known"] } time-macros = { version = "0.2", default-features = false, features = ["formatting", "parsing", "serde"] } tinyvec = { version = "1", features = ["alloc", "grab_spare_slice", "rustc_1_55"] } -tokio = { version = "1", features = ["full", "stats", "tracing"] } -tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "4538cd6", features = ["with-chrono-0_4"] } -tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "fe39bb8e", features = ["fs", "net"] } -tokio-util = { version = "0.7", features = ["codec", "io"] } toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } toml_edit = { version = "0.19", features = ["serde"] } -tonic = { version = "0.9", features = ["gzip", "tls-webpki-roots"] } -tower = { version = "0.4", features = ["balance", "buffer", "filter", "limit", "load-shed", "retry", "timeout", "util"] } -tracing = { version = "0.1", features = ["log"] } -tracing-core = { version = "0.1" } -tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "local-time", "parking_lot"] } unicode-bidi = { version = "0.3" } unicode-normalization = { version = "0.1" } url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["fast-rng", "serde", "v4"] } -zeroize = { version = "1", features = ["zeroize_derive"] } ### END HAKARI SECTION